Commit | Line | Data |
---|---|---|
c0a31329 TG |
1 | /* |
2 | * linux/kernel/hrtimer.c | |
3 | * | |
3c8aa39d | 4 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> |
79bf2bb3 | 5 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar |
54cdfdb4 | 6 | * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner |
c0a31329 TG |
7 | * |
8 | * High-resolution kernel timers | |
9 | * | |
10 | * In contrast to the low-resolution timeout API implemented in | |
11 | * kernel/timer.c, hrtimers provide finer resolution and accuracy | |
12 | * depending on system configuration and capabilities. | |
13 | * | |
14 | * These timers are currently used for: | |
15 | * - itimers | |
16 | * - POSIX timers | |
17 | * - nanosleep | |
18 | * - precise in-kernel timing | |
19 | * | |
20 | * Started by: Thomas Gleixner and Ingo Molnar | |
21 | * | |
22 | * Credits: | |
23 | * based on kernel/timer.c | |
24 | * | |
66188fae TG |
25 | * Help, testing, suggestions, bugfixes, improvements were |
26 | * provided by: | |
27 | * | |
28 | * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel | |
29 | * et. al. | |
30 | * | |
c0a31329 TG |
31 | * For licencing details see kernel-base/COPYING |
32 | */ | |
33 | ||
34 | #include <linux/cpu.h> | |
35 | #include <linux/module.h> | |
36 | #include <linux/percpu.h> | |
37 | #include <linux/hrtimer.h> | |
38 | #include <linux/notifier.h> | |
39 | #include <linux/syscalls.h> | |
54cdfdb4 | 40 | #include <linux/kallsyms.h> |
c0a31329 | 41 | #include <linux/interrupt.h> |
79bf2bb3 | 42 | #include <linux/tick.h> |
54cdfdb4 TG |
43 | #include <linux/seq_file.h> |
44 | #include <linux/err.h> | |
237fc6e7 | 45 | #include <linux/debugobjects.h> |
eea08f32 AB |
46 | #include <linux/sched.h> |
47 | #include <linux/timer.h> | |
c0a31329 TG |
48 | |
49 | #include <asm/uaccess.h> | |
50 | ||
51 | /** | |
52 | * ktime_get - get the monotonic time in ktime_t format | |
53 | * | |
54 | * returns the time in ktime_t format | |
55 | */ | |
d316c57f | 56 | ktime_t ktime_get(void) |
c0a31329 TG |
57 | { |
58 | struct timespec now; | |
59 | ||
60 | ktime_get_ts(&now); | |
61 | ||
62 | return timespec_to_ktime(now); | |
63 | } | |
641b9e0e | 64 | EXPORT_SYMBOL_GPL(ktime_get); |
c0a31329 TG |
65 | |
66 | /** | |
67 | * ktime_get_real - get the real (wall-) time in ktime_t format | |
68 | * | |
69 | * returns the time in ktime_t format | |
70 | */ | |
d316c57f | 71 | ktime_t ktime_get_real(void) |
c0a31329 TG |
72 | { |
73 | struct timespec now; | |
74 | ||
75 | getnstimeofday(&now); | |
76 | ||
77 | return timespec_to_ktime(now); | |
78 | } | |
79 | ||
80 | EXPORT_SYMBOL_GPL(ktime_get_real); | |
81 | ||
82 | /* | |
83 | * The timer bases: | |
7978672c GA |
84 | * |
85 | * Note: If we want to add new timer bases, we have to skip the two | |
86 | * clock ids captured by the cpu-timers. We do this by holding empty | |
87 | * entries rather than doing math adjustment of the clock ids. | |
88 | * This ensures that we capture erroneous accesses to these clock ids | |
89 | * rather than moving them into the range of valid clock id's. | |
c0a31329 | 90 | */ |
54cdfdb4 | 91 | DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = |
c0a31329 | 92 | { |
3c8aa39d TG |
93 | |
94 | .clock_base = | |
c0a31329 | 95 | { |
3c8aa39d TG |
96 | { |
97 | .index = CLOCK_REALTIME, | |
98 | .get_time = &ktime_get_real, | |
54cdfdb4 | 99 | .resolution = KTIME_LOW_RES, |
3c8aa39d TG |
100 | }, |
101 | { | |
102 | .index = CLOCK_MONOTONIC, | |
103 | .get_time = &ktime_get, | |
54cdfdb4 | 104 | .resolution = KTIME_LOW_RES, |
3c8aa39d TG |
105 | }, |
106 | } | |
c0a31329 TG |
107 | }; |
108 | ||
109 | /** | |
110 | * ktime_get_ts - get the monotonic clock in timespec format | |
c0a31329 TG |
111 | * @ts: pointer to timespec variable |
112 | * | |
113 | * The function calculates the monotonic clock from the realtime | |
114 | * clock and the wall_to_monotonic offset and stores the result | |
72fd4a35 | 115 | * in normalized timespec format in the variable pointed to by @ts. |
c0a31329 TG |
116 | */ |
117 | void ktime_get_ts(struct timespec *ts) | |
118 | { | |
119 | struct timespec tomono; | |
120 | unsigned long seq; | |
121 | ||
122 | do { | |
123 | seq = read_seqbegin(&xtime_lock); | |
124 | getnstimeofday(ts); | |
125 | tomono = wall_to_monotonic; | |
126 | ||
127 | } while (read_seqretry(&xtime_lock, seq)); | |
128 | ||
129 | set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, | |
130 | ts->tv_nsec + tomono.tv_nsec); | |
131 | } | |
69778e32 | 132 | EXPORT_SYMBOL_GPL(ktime_get_ts); |
c0a31329 | 133 | |
92127c7a TG |
134 | /* |
135 | * Get the coarse grained time at the softirq based on xtime and | |
136 | * wall_to_monotonic. | |
137 | */ | |
3c8aa39d | 138 | static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) |
92127c7a TG |
139 | { |
140 | ktime_t xtim, tomono; | |
ad28d94a | 141 | struct timespec xts, tom; |
92127c7a TG |
142 | unsigned long seq; |
143 | ||
144 | do { | |
145 | seq = read_seqbegin(&xtime_lock); | |
2c6b47de | 146 | xts = current_kernel_time(); |
ad28d94a | 147 | tom = wall_to_monotonic; |
92127c7a TG |
148 | } while (read_seqretry(&xtime_lock, seq)); |
149 | ||
f4304ab2 | 150 | xtim = timespec_to_ktime(xts); |
ad28d94a | 151 | tomono = timespec_to_ktime(tom); |
3c8aa39d TG |
152 | base->clock_base[CLOCK_REALTIME].softirq_time = xtim; |
153 | base->clock_base[CLOCK_MONOTONIC].softirq_time = | |
154 | ktime_add(xtim, tomono); | |
92127c7a TG |
155 | } |
156 | ||
c0a31329 TG |
157 | /* |
158 | * Functions and macros which are different for UP/SMP systems are kept in a | |
159 | * single place | |
160 | */ | |
161 | #ifdef CONFIG_SMP | |
162 | ||
c0a31329 TG |
163 | /* |
164 | * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock | |
165 | * means that all timers which are tied to this base via timer->base are | |
166 | * locked, and the base itself is locked too. | |
167 | * | |
168 | * So __run_timers/migrate_timers can safely modify all timers which could | |
169 | * be found on the lists/queues. | |
170 | * | |
171 | * When the timer's base is locked, and the timer removed from list, it is | |
172 | * possible to set timer->base = NULL and drop the lock: the timer remains | |
173 | * locked. | |
174 | */ | |
3c8aa39d TG |
175 | static |
176 | struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, | |
177 | unsigned long *flags) | |
c0a31329 | 178 | { |
3c8aa39d | 179 | struct hrtimer_clock_base *base; |
c0a31329 TG |
180 | |
181 | for (;;) { | |
182 | base = timer->base; | |
183 | if (likely(base != NULL)) { | |
3c8aa39d | 184 | spin_lock_irqsave(&base->cpu_base->lock, *flags); |
c0a31329 TG |
185 | if (likely(base == timer->base)) |
186 | return base; | |
187 | /* The timer has migrated to another CPU: */ | |
3c8aa39d | 188 | spin_unlock_irqrestore(&base->cpu_base->lock, *flags); |
c0a31329 TG |
189 | } |
190 | cpu_relax(); | |
191 | } | |
192 | } | |
193 | ||
6ff7041d TG |
194 | |
195 | /* | |
196 | * Get the preferred target CPU for NOHZ | |
197 | */ | |
198 | static int hrtimer_get_target(int this_cpu, int pinned) | |
199 | { | |
200 | #ifdef CONFIG_NO_HZ | |
201 | if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu)) { | |
202 | int preferred_cpu = get_nohz_load_balancer(); | |
203 | ||
204 | if (preferred_cpu >= 0) | |
205 | return preferred_cpu; | |
206 | } | |
207 | #endif | |
208 | return this_cpu; | |
209 | } | |
210 | ||
211 | /* | |
212 | * With HIGHRES=y we do not migrate the timer when it is expiring | |
213 | * before the next event on the target cpu because we cannot reprogram | |
214 | * the target cpu hardware and we would cause it to fire late. | |
215 | * | |
216 | * Called with cpu_base->lock of target cpu held. | |
217 | */ | |
218 | static int | |
219 | hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base) | |
220 | { | |
221 | #ifdef CONFIG_HIGH_RES_TIMERS | |
222 | ktime_t expires; | |
223 | ||
224 | if (!new_base->cpu_base->hres_active) | |
225 | return 0; | |
226 | ||
227 | expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); | |
228 | return expires.tv64 <= new_base->cpu_base->expires_next.tv64; | |
229 | #else | |
230 | return 0; | |
231 | #endif | |
232 | } | |
233 | ||
c0a31329 TG |
234 | /* |
235 | * Switch the timer base to the current CPU when possible. | |
236 | */ | |
3c8aa39d | 237 | static inline struct hrtimer_clock_base * |
597d0275 AB |
238 | switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, |
239 | int pinned) | |
c0a31329 | 240 | { |
3c8aa39d TG |
241 | struct hrtimer_clock_base *new_base; |
242 | struct hrtimer_cpu_base *new_cpu_base; | |
6ff7041d TG |
243 | int this_cpu = smp_processor_id(); |
244 | int cpu = hrtimer_get_target(this_cpu, pinned); | |
c0a31329 | 245 | |
eea08f32 AB |
246 | again: |
247 | new_cpu_base = &per_cpu(hrtimer_bases, cpu); | |
3c8aa39d | 248 | new_base = &new_cpu_base->clock_base[base->index]; |
c0a31329 TG |
249 | |
250 | if (base != new_base) { | |
251 | /* | |
6ff7041d | 252 | * We are trying to move timer to new_base. |
c0a31329 TG |
253 | * However we can't change timer's base while it is running, |
254 | * so we keep it on the same CPU. No hassle vs. reprogramming | |
255 | * the event source in the high resolution case. The softirq | |
256 | * code will take care of this when the timer function has | |
257 | * completed. There is no conflict as we hold the lock until | |
258 | * the timer is enqueued. | |
259 | */ | |
54cdfdb4 | 260 | if (unlikely(hrtimer_callback_running(timer))) |
c0a31329 TG |
261 | return base; |
262 | ||
263 | /* See the comment in lock_timer_base() */ | |
264 | timer->base = NULL; | |
3c8aa39d TG |
265 | spin_unlock(&base->cpu_base->lock); |
266 | spin_lock(&new_base->cpu_base->lock); | |
eea08f32 | 267 | |
6ff7041d TG |
268 | if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { |
269 | cpu = this_cpu; | |
270 | spin_unlock(&new_base->cpu_base->lock); | |
271 | spin_lock(&base->cpu_base->lock); | |
272 | timer->base = base; | |
273 | goto again; | |
eea08f32 | 274 | } |
c0a31329 TG |
275 | timer->base = new_base; |
276 | } | |
277 | return new_base; | |
278 | } | |
279 | ||
280 | #else /* CONFIG_SMP */ | |
281 | ||
3c8aa39d | 282 | static inline struct hrtimer_clock_base * |
c0a31329 TG |
283 | lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) |
284 | { | |
3c8aa39d | 285 | struct hrtimer_clock_base *base = timer->base; |
c0a31329 | 286 | |
3c8aa39d | 287 | spin_lock_irqsave(&base->cpu_base->lock, *flags); |
c0a31329 TG |
288 | |
289 | return base; | |
290 | } | |
291 | ||
eea08f32 | 292 | # define switch_hrtimer_base(t, b, p) (b) |
c0a31329 TG |
293 | |
294 | #endif /* !CONFIG_SMP */ | |
295 | ||
296 | /* | |
297 | * Functions for the union type storage format of ktime_t which are | |
298 | * too large for inlining: | |
299 | */ | |
300 | #if BITS_PER_LONG < 64 | |
301 | # ifndef CONFIG_KTIME_SCALAR | |
302 | /** | |
303 | * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable | |
c0a31329 TG |
304 | * @kt: addend |
305 | * @nsec: the scalar nsec value to add | |
306 | * | |
307 | * Returns the sum of kt and nsec in ktime_t format | |
308 | */ | |
309 | ktime_t ktime_add_ns(const ktime_t kt, u64 nsec) | |
310 | { | |
311 | ktime_t tmp; | |
312 | ||
313 | if (likely(nsec < NSEC_PER_SEC)) { | |
314 | tmp.tv64 = nsec; | |
315 | } else { | |
316 | unsigned long rem = do_div(nsec, NSEC_PER_SEC); | |
317 | ||
318 | tmp = ktime_set((long)nsec, rem); | |
319 | } | |
320 | ||
321 | return ktime_add(kt, tmp); | |
322 | } | |
b8b8fd2d DH |
323 | |
324 | EXPORT_SYMBOL_GPL(ktime_add_ns); | |
a272378d ACM |
325 | |
326 | /** | |
327 | * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable | |
328 | * @kt: minuend | |
329 | * @nsec: the scalar nsec value to subtract | |
330 | * | |
331 | * Returns the subtraction of @nsec from @kt in ktime_t format | |
332 | */ | |
333 | ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec) | |
334 | { | |
335 | ktime_t tmp; | |
336 | ||
337 | if (likely(nsec < NSEC_PER_SEC)) { | |
338 | tmp.tv64 = nsec; | |
339 | } else { | |
340 | unsigned long rem = do_div(nsec, NSEC_PER_SEC); | |
341 | ||
342 | tmp = ktime_set((long)nsec, rem); | |
343 | } | |
344 | ||
345 | return ktime_sub(kt, tmp); | |
346 | } | |
347 | ||
348 | EXPORT_SYMBOL_GPL(ktime_sub_ns); | |
c0a31329 TG |
349 | # endif /* !CONFIG_KTIME_SCALAR */ |
350 | ||
351 | /* | |
352 | * Divide a ktime value by a nanosecond value | |
353 | */ | |
4d672e7a | 354 | u64 ktime_divns(const ktime_t kt, s64 div) |
c0a31329 | 355 | { |
900cfa46 | 356 | u64 dclc; |
c0a31329 TG |
357 | int sft = 0; |
358 | ||
900cfa46 | 359 | dclc = ktime_to_ns(kt); |
c0a31329 TG |
360 | /* Make sure the divisor is less than 2^32: */ |
361 | while (div >> 32) { | |
362 | sft++; | |
363 | div >>= 1; | |
364 | } | |
365 | dclc >>= sft; | |
366 | do_div(dclc, (unsigned long) div); | |
367 | ||
4d672e7a | 368 | return dclc; |
c0a31329 | 369 | } |
c0a31329 TG |
370 | #endif /* BITS_PER_LONG >= 64 */ |
371 | ||
5a7780e7 TG |
372 | /* |
373 | * Add two ktime values and do a safety check for overflow: | |
374 | */ | |
375 | ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs) | |
376 | { | |
377 | ktime_t res = ktime_add(lhs, rhs); | |
378 | ||
379 | /* | |
380 | * We use KTIME_SEC_MAX here, the maximum timeout which we can | |
381 | * return to user space in a timespec: | |
382 | */ | |
383 | if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64) | |
384 | res = ktime_set(KTIME_SEC_MAX, 0); | |
385 | ||
386 | return res; | |
387 | } | |
388 | ||
8daa21e6 AB |
389 | EXPORT_SYMBOL_GPL(ktime_add_safe); |
390 | ||
237fc6e7 TG |
391 | #ifdef CONFIG_DEBUG_OBJECTS_TIMERS |
392 | ||
393 | static struct debug_obj_descr hrtimer_debug_descr; | |
394 | ||
395 | /* | |
396 | * fixup_init is called when: | |
397 | * - an active object is initialized | |
398 | */ | |
399 | static int hrtimer_fixup_init(void *addr, enum debug_obj_state state) | |
400 | { | |
401 | struct hrtimer *timer = addr; | |
402 | ||
403 | switch (state) { | |
404 | case ODEBUG_STATE_ACTIVE: | |
405 | hrtimer_cancel(timer); | |
406 | debug_object_init(timer, &hrtimer_debug_descr); | |
407 | return 1; | |
408 | default: | |
409 | return 0; | |
410 | } | |
411 | } | |
412 | ||
413 | /* | |
414 | * fixup_activate is called when: | |
415 | * - an active object is activated | |
416 | * - an unknown object is activated (might be a statically initialized object) | |
417 | */ | |
418 | static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state) | |
419 | { | |
420 | switch (state) { | |
421 | ||
422 | case ODEBUG_STATE_NOTAVAILABLE: | |
423 | WARN_ON_ONCE(1); | |
424 | return 0; | |
425 | ||
426 | case ODEBUG_STATE_ACTIVE: | |
427 | WARN_ON(1); | |
428 | ||
429 | default: | |
430 | return 0; | |
431 | } | |
432 | } | |
433 | ||
434 | /* | |
435 | * fixup_free is called when: | |
436 | * - an active object is freed | |
437 | */ | |
438 | static int hrtimer_fixup_free(void *addr, enum debug_obj_state state) | |
439 | { | |
440 | struct hrtimer *timer = addr; | |
441 | ||
442 | switch (state) { | |
443 | case ODEBUG_STATE_ACTIVE: | |
444 | hrtimer_cancel(timer); | |
445 | debug_object_free(timer, &hrtimer_debug_descr); | |
446 | return 1; | |
447 | default: | |
448 | return 0; | |
449 | } | |
450 | } | |
451 | ||
452 | static struct debug_obj_descr hrtimer_debug_descr = { | |
453 | .name = "hrtimer", | |
454 | .fixup_init = hrtimer_fixup_init, | |
455 | .fixup_activate = hrtimer_fixup_activate, | |
456 | .fixup_free = hrtimer_fixup_free, | |
457 | }; | |
458 | ||
459 | static inline void debug_hrtimer_init(struct hrtimer *timer) | |
460 | { | |
461 | debug_object_init(timer, &hrtimer_debug_descr); | |
462 | } | |
463 | ||
464 | static inline void debug_hrtimer_activate(struct hrtimer *timer) | |
465 | { | |
466 | debug_object_activate(timer, &hrtimer_debug_descr); | |
467 | } | |
468 | ||
469 | static inline void debug_hrtimer_deactivate(struct hrtimer *timer) | |
470 | { | |
471 | debug_object_deactivate(timer, &hrtimer_debug_descr); | |
472 | } | |
473 | ||
474 | static inline void debug_hrtimer_free(struct hrtimer *timer) | |
475 | { | |
476 | debug_object_free(timer, &hrtimer_debug_descr); | |
477 | } | |
478 | ||
479 | static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | |
480 | enum hrtimer_mode mode); | |
481 | ||
482 | void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id, | |
483 | enum hrtimer_mode mode) | |
484 | { | |
485 | debug_object_init_on_stack(timer, &hrtimer_debug_descr); | |
486 | __hrtimer_init(timer, clock_id, mode); | |
487 | } | |
2bc481cf | 488 | EXPORT_SYMBOL_GPL(hrtimer_init_on_stack); |
237fc6e7 TG |
489 | |
490 | void destroy_hrtimer_on_stack(struct hrtimer *timer) | |
491 | { | |
492 | debug_object_free(timer, &hrtimer_debug_descr); | |
493 | } | |
494 | ||
495 | #else | |
496 | static inline void debug_hrtimer_init(struct hrtimer *timer) { } | |
497 | static inline void debug_hrtimer_activate(struct hrtimer *timer) { } | |
498 | static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } | |
499 | #endif | |
500 | ||
54cdfdb4 TG |
501 | /* High resolution timer related functions */ |
502 | #ifdef CONFIG_HIGH_RES_TIMERS | |
503 | ||
504 | /* | |
505 | * High resolution timer enabled ? | |
506 | */ | |
507 | static int hrtimer_hres_enabled __read_mostly = 1; | |
508 | ||
509 | /* | |
510 | * Enable / Disable high resolution mode | |
511 | */ | |
512 | static int __init setup_hrtimer_hres(char *str) | |
513 | { | |
514 | if (!strcmp(str, "off")) | |
515 | hrtimer_hres_enabled = 0; | |
516 | else if (!strcmp(str, "on")) | |
517 | hrtimer_hres_enabled = 1; | |
518 | else | |
519 | return 0; | |
520 | return 1; | |
521 | } | |
522 | ||
523 | __setup("highres=", setup_hrtimer_hres); | |
524 | ||
525 | /* | |
526 | * hrtimer_high_res_enabled - query, if the highres mode is enabled | |
527 | */ | |
528 | static inline int hrtimer_is_hres_enabled(void) | |
529 | { | |
530 | return hrtimer_hres_enabled; | |
531 | } | |
532 | ||
533 | /* | |
534 | * Is the high resolution mode active ? | |
535 | */ | |
536 | static inline int hrtimer_hres_active(void) | |
537 | { | |
538 | return __get_cpu_var(hrtimer_bases).hres_active; | |
539 | } | |
540 | ||
541 | /* | |
542 | * Reprogram the event source with checking both queues for the | |
543 | * next event | |
544 | * Called with interrupts disabled and base->lock held | |
545 | */ | |
546 | static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base) | |
547 | { | |
548 | int i; | |
549 | struct hrtimer_clock_base *base = cpu_base->clock_base; | |
550 | ktime_t expires; | |
551 | ||
552 | cpu_base->expires_next.tv64 = KTIME_MAX; | |
553 | ||
554 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { | |
555 | struct hrtimer *timer; | |
556 | ||
557 | if (!base->first) | |
558 | continue; | |
559 | timer = rb_entry(base->first, struct hrtimer, node); | |
cc584b21 | 560 | expires = ktime_sub(hrtimer_get_expires(timer), base->offset); |
b0a9b511 TG |
561 | /* |
562 | * clock_was_set() has changed base->offset so the | |
563 | * result might be negative. Fix it up to prevent a | |
564 | * false positive in clockevents_program_event() | |
565 | */ | |
566 | if (expires.tv64 < 0) | |
567 | expires.tv64 = 0; | |
54cdfdb4 TG |
568 | if (expires.tv64 < cpu_base->expires_next.tv64) |
569 | cpu_base->expires_next = expires; | |
570 | } | |
571 | ||
572 | if (cpu_base->expires_next.tv64 != KTIME_MAX) | |
573 | tick_program_event(cpu_base->expires_next, 1); | |
574 | } | |
575 | ||
576 | /* | |
577 | * Shared reprogramming for clock_realtime and clock_monotonic | |
578 | * | |
579 | * When a timer is enqueued and expires earlier than the already enqueued | |
580 | * timers, we have to check, whether it expires earlier than the timer for | |
581 | * which the clock event device was armed. | |
582 | * | |
583 | * Called with interrupts disabled and base->cpu_base.lock held | |
584 | */ | |
585 | static int hrtimer_reprogram(struct hrtimer *timer, | |
586 | struct hrtimer_clock_base *base) | |
587 | { | |
588 | ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next; | |
cc584b21 | 589 | ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); |
54cdfdb4 TG |
590 | int res; |
591 | ||
cc584b21 | 592 | WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0); |
63070a79 | 593 | |
54cdfdb4 TG |
594 | /* |
595 | * When the callback is running, we do not reprogram the clock event | |
596 | * device. The timer callback is either running on a different CPU or | |
3a4fa0a2 | 597 | * the callback is executed in the hrtimer_interrupt context. The |
54cdfdb4 TG |
598 | * reprogramming is handled either by the softirq, which called the |
599 | * callback or at the end of the hrtimer_interrupt. | |
600 | */ | |
601 | if (hrtimer_callback_running(timer)) | |
602 | return 0; | |
603 | ||
63070a79 TG |
604 | /* |
605 | * CLOCK_REALTIME timer might be requested with an absolute | |
606 | * expiry time which is less than base->offset. Nothing wrong | |
607 | * about that, just avoid to call into the tick code, which | |
608 | * has now objections against negative expiry values. | |
609 | */ | |
610 | if (expires.tv64 < 0) | |
611 | return -ETIME; | |
612 | ||
54cdfdb4 TG |
613 | if (expires.tv64 >= expires_next->tv64) |
614 | return 0; | |
615 | ||
616 | /* | |
617 | * Clockevents returns -ETIME, when the event was in the past. | |
618 | */ | |
619 | res = tick_program_event(expires, 0); | |
620 | if (!IS_ERR_VALUE(res)) | |
621 | *expires_next = expires; | |
622 | return res; | |
623 | } | |
624 | ||
625 | ||
626 | /* | |
627 | * Retrigger next event is called after clock was set | |
628 | * | |
629 | * Called with interrupts disabled via on_each_cpu() | |
630 | */ | |
631 | static void retrigger_next_event(void *arg) | |
632 | { | |
633 | struct hrtimer_cpu_base *base; | |
634 | struct timespec realtime_offset; | |
635 | unsigned long seq; | |
636 | ||
637 | if (!hrtimer_hres_active()) | |
638 | return; | |
639 | ||
640 | do { | |
641 | seq = read_seqbegin(&xtime_lock); | |
642 | set_normalized_timespec(&realtime_offset, | |
643 | -wall_to_monotonic.tv_sec, | |
644 | -wall_to_monotonic.tv_nsec); | |
645 | } while (read_seqretry(&xtime_lock, seq)); | |
646 | ||
647 | base = &__get_cpu_var(hrtimer_bases); | |
648 | ||
649 | /* Adjust CLOCK_REALTIME offset */ | |
650 | spin_lock(&base->lock); | |
651 | base->clock_base[CLOCK_REALTIME].offset = | |
652 | timespec_to_ktime(realtime_offset); | |
653 | ||
654 | hrtimer_force_reprogram(base); | |
655 | spin_unlock(&base->lock); | |
656 | } | |
657 | ||
658 | /* | |
659 | * Clock realtime was set | |
660 | * | |
661 | * Change the offset of the realtime clock vs. the monotonic | |
662 | * clock. | |
663 | * | |
664 | * We might have to reprogram the high resolution timer interrupt. On | |
665 | * SMP we call the architecture specific code to retrigger _all_ high | |
666 | * resolution timer interrupts. On UP we just disable interrupts and | |
667 | * call the high resolution interrupt code. | |
668 | */ | |
669 | void clock_was_set(void) | |
670 | { | |
671 | /* Retrigger the CPU local events everywhere */ | |
15c8b6c1 | 672 | on_each_cpu(retrigger_next_event, NULL, 1); |
54cdfdb4 TG |
673 | } |
674 | ||
995f054f IM |
675 | /* |
676 | * During resume we might have to reprogram the high resolution timer | |
677 | * interrupt (on the local CPU): | |
678 | */ | |
679 | void hres_timers_resume(void) | |
680 | { | |
1d4a7f1c PZ |
681 | WARN_ONCE(!irqs_disabled(), |
682 | KERN_INFO "hres_timers_resume() called with IRQs enabled!"); | |
683 | ||
995f054f IM |
684 | retrigger_next_event(NULL); |
685 | } | |
686 | ||
54cdfdb4 TG |
687 | /* |
688 | * Initialize the high resolution related parts of cpu_base | |
689 | */ | |
690 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) | |
691 | { | |
692 | base->expires_next.tv64 = KTIME_MAX; | |
693 | base->hres_active = 0; | |
54cdfdb4 TG |
694 | } |
695 | ||
696 | /* | |
697 | * Initialize the high resolution related parts of a hrtimer | |
698 | */ | |
699 | static inline void hrtimer_init_timer_hres(struct hrtimer *timer) | |
700 | { | |
54cdfdb4 TG |
701 | } |
702 | ||
ca109491 | 703 | |
54cdfdb4 TG |
704 | /* |
705 | * When High resolution timers are active, try to reprogram. Note, that in case | |
706 | * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry | |
707 | * check happens. The timer gets enqueued into the rbtree. The reprogramming | |
708 | * and expiry check is done in the hrtimer_interrupt or in the softirq. | |
709 | */ | |
710 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |
7f1e2ca9 PZ |
711 | struct hrtimer_clock_base *base, |
712 | int wakeup) | |
54cdfdb4 TG |
713 | { |
714 | if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { | |
7f1e2ca9 PZ |
715 | if (wakeup) { |
716 | spin_unlock(&base->cpu_base->lock); | |
717 | raise_softirq_irqoff(HRTIMER_SOFTIRQ); | |
718 | spin_lock(&base->cpu_base->lock); | |
719 | } else | |
720 | __raise_softirq_irqoff(HRTIMER_SOFTIRQ); | |
721 | ||
ca109491 | 722 | return 1; |
54cdfdb4 | 723 | } |
7f1e2ca9 | 724 | |
54cdfdb4 TG |
725 | return 0; |
726 | } | |
727 | ||
728 | /* | |
729 | * Switch to high resolution mode | |
730 | */ | |
f8953856 | 731 | static int hrtimer_switch_to_hres(void) |
54cdfdb4 | 732 | { |
820de5c3 IM |
733 | int cpu = smp_processor_id(); |
734 | struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu); | |
54cdfdb4 TG |
735 | unsigned long flags; |
736 | ||
737 | if (base->hres_active) | |
f8953856 | 738 | return 1; |
54cdfdb4 TG |
739 | |
740 | local_irq_save(flags); | |
741 | ||
742 | if (tick_init_highres()) { | |
743 | local_irq_restore(flags); | |
820de5c3 IM |
744 | printk(KERN_WARNING "Could not switch to high resolution " |
745 | "mode on CPU %d\n", cpu); | |
f8953856 | 746 | return 0; |
54cdfdb4 TG |
747 | } |
748 | base->hres_active = 1; | |
749 | base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES; | |
750 | base->clock_base[CLOCK_MONOTONIC].resolution = KTIME_HIGH_RES; | |
751 | ||
752 | tick_setup_sched_timer(); | |
753 | ||
754 | /* "Retrigger" the interrupt to get things going */ | |
755 | retrigger_next_event(NULL); | |
756 | local_irq_restore(flags); | |
edfed66e | 757 | printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n", |
54cdfdb4 | 758 | smp_processor_id()); |
f8953856 | 759 | return 1; |
54cdfdb4 TG |
760 | } |
761 | ||
762 | #else | |
763 | ||
764 | static inline int hrtimer_hres_active(void) { return 0; } | |
765 | static inline int hrtimer_is_hres_enabled(void) { return 0; } | |
f8953856 | 766 | static inline int hrtimer_switch_to_hres(void) { return 0; } |
54cdfdb4 TG |
767 | static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { } |
768 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |
7f1e2ca9 PZ |
769 | struct hrtimer_clock_base *base, |
770 | int wakeup) | |
54cdfdb4 TG |
771 | { |
772 | return 0; | |
773 | } | |
54cdfdb4 TG |
774 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } |
775 | static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { } | |
776 | ||
777 | #endif /* CONFIG_HIGH_RES_TIMERS */ | |
778 | ||
82f67cd9 IM |
779 | #ifdef CONFIG_TIMER_STATS |
780 | void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr) | |
781 | { | |
782 | if (timer->start_site) | |
783 | return; | |
784 | ||
785 | timer->start_site = addr; | |
786 | memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); | |
787 | timer->start_pid = current->pid; | |
788 | } | |
789 | #endif | |
790 | ||
c0a31329 | 791 | /* |
6506f2aa | 792 | * Counterpart to lock_hrtimer_base above: |
c0a31329 TG |
793 | */ |
794 | static inline | |
795 | void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | |
796 | { | |
3c8aa39d | 797 | spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); |
c0a31329 TG |
798 | } |
799 | ||
800 | /** | |
801 | * hrtimer_forward - forward the timer expiry | |
c0a31329 | 802 | * @timer: hrtimer to forward |
44f21475 | 803 | * @now: forward past this time |
c0a31329 TG |
804 | * @interval: the interval to forward |
805 | * | |
806 | * Forward the timer expiry so it will expire in the future. | |
8dca6f33 | 807 | * Returns the number of overruns. |
c0a31329 | 808 | */ |
4d672e7a | 809 | u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) |
c0a31329 | 810 | { |
4d672e7a | 811 | u64 orun = 1; |
44f21475 | 812 | ktime_t delta; |
c0a31329 | 813 | |
cc584b21 | 814 | delta = ktime_sub(now, hrtimer_get_expires(timer)); |
c0a31329 TG |
815 | |
816 | if (delta.tv64 < 0) | |
817 | return 0; | |
818 | ||
c9db4fa1 TG |
819 | if (interval.tv64 < timer->base->resolution.tv64) |
820 | interval.tv64 = timer->base->resolution.tv64; | |
821 | ||
c0a31329 | 822 | if (unlikely(delta.tv64 >= interval.tv64)) { |
df869b63 | 823 | s64 incr = ktime_to_ns(interval); |
c0a31329 TG |
824 | |
825 | orun = ktime_divns(delta, incr); | |
cc584b21 AV |
826 | hrtimer_add_expires_ns(timer, incr * orun); |
827 | if (hrtimer_get_expires_tv64(timer) > now.tv64) | |
c0a31329 TG |
828 | return orun; |
829 | /* | |
830 | * This (and the ktime_add() below) is the | |
831 | * correction for exact: | |
832 | */ | |
833 | orun++; | |
834 | } | |
cc584b21 | 835 | hrtimer_add_expires(timer, interval); |
c0a31329 TG |
836 | |
837 | return orun; | |
838 | } | |
6bdb6b62 | 839 | EXPORT_SYMBOL_GPL(hrtimer_forward); |
c0a31329 TG |
840 | |
841 | /* | |
842 | * enqueue_hrtimer - internal function to (re)start a timer | |
843 | * | |
844 | * The timer is inserted in expiry order. Insertion into the | |
845 | * red black tree is O(log(n)). Must hold the base lock. | |
a6037b61 PZ |
846 | * |
847 | * Returns 1 when the new timer is the leftmost timer in the tree. | |
c0a31329 | 848 | */ |
a6037b61 PZ |
849 | static int enqueue_hrtimer(struct hrtimer *timer, |
850 | struct hrtimer_clock_base *base) | |
c0a31329 TG |
851 | { |
852 | struct rb_node **link = &base->active.rb_node; | |
c0a31329 TG |
853 | struct rb_node *parent = NULL; |
854 | struct hrtimer *entry; | |
99bc2fcb | 855 | int leftmost = 1; |
c0a31329 | 856 | |
237fc6e7 TG |
857 | debug_hrtimer_activate(timer); |
858 | ||
c0a31329 TG |
859 | /* |
860 | * Find the right place in the rbtree: | |
861 | */ | |
862 | while (*link) { | |
863 | parent = *link; | |
864 | entry = rb_entry(parent, struct hrtimer, node); | |
865 | /* | |
866 | * We dont care about collisions. Nodes with | |
867 | * the same expiry time stay together. | |
868 | */ | |
cc584b21 AV |
869 | if (hrtimer_get_expires_tv64(timer) < |
870 | hrtimer_get_expires_tv64(entry)) { | |
c0a31329 | 871 | link = &(*link)->rb_left; |
99bc2fcb | 872 | } else { |
c0a31329 | 873 | link = &(*link)->rb_right; |
99bc2fcb IM |
874 | leftmost = 0; |
875 | } | |
c0a31329 TG |
876 | } |
877 | ||
878 | /* | |
288867ec TG |
879 | * Insert the timer to the rbtree and check whether it |
880 | * replaces the first pending timer | |
c0a31329 | 881 | */ |
a6037b61 | 882 | if (leftmost) |
54cdfdb4 | 883 | base->first = &timer->node; |
54cdfdb4 | 884 | |
c0a31329 TG |
885 | rb_link_node(&timer->node, parent, link); |
886 | rb_insert_color(&timer->node, &base->active); | |
303e967f TG |
887 | /* |
888 | * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the | |
889 | * state of a possibly running callback. | |
890 | */ | |
891 | timer->state |= HRTIMER_STATE_ENQUEUED; | |
a6037b61 PZ |
892 | |
893 | return leftmost; | |
288867ec | 894 | } |
c0a31329 TG |
895 | |
896 | /* | |
897 | * __remove_hrtimer - internal function to remove a timer | |
898 | * | |
899 | * Caller must hold the base lock. | |
54cdfdb4 TG |
900 | * |
901 | * High resolution timer mode reprograms the clock event device when the | |
902 | * timer is the one which expires next. The caller can disable this by setting | |
903 | * reprogram to zero. This is useful, when the context does a reprogramming | |
904 | * anyway (e.g. timer interrupt) | |
c0a31329 | 905 | */ |
3c8aa39d | 906 | static void __remove_hrtimer(struct hrtimer *timer, |
303e967f | 907 | struct hrtimer_clock_base *base, |
54cdfdb4 | 908 | unsigned long newstate, int reprogram) |
c0a31329 | 909 | { |
ca109491 | 910 | if (timer->state & HRTIMER_STATE_ENQUEUED) { |
54cdfdb4 TG |
911 | /* |
912 | * Remove the timer from the rbtree and replace the | |
913 | * first entry pointer if necessary. | |
914 | */ | |
915 | if (base->first == &timer->node) { | |
916 | base->first = rb_next(&timer->node); | |
917 | /* Reprogram the clock event device. if enabled */ | |
918 | if (reprogram && hrtimer_hres_active()) | |
919 | hrtimer_force_reprogram(base->cpu_base); | |
920 | } | |
921 | rb_erase(&timer->node, &base->active); | |
922 | } | |
303e967f | 923 | timer->state = newstate; |
c0a31329 TG |
924 | } |
925 | ||
926 | /* | |
927 | * remove hrtimer, called with base lock held | |
928 | */ | |
929 | static inline int | |
3c8aa39d | 930 | remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) |
c0a31329 | 931 | { |
303e967f | 932 | if (hrtimer_is_queued(timer)) { |
54cdfdb4 TG |
933 | int reprogram; |
934 | ||
935 | /* | |
936 | * Remove the timer and force reprogramming when high | |
937 | * resolution mode is active and the timer is on the current | |
938 | * CPU. If we remove a timer on another CPU, reprogramming is | |
939 | * skipped. The interrupt event on this CPU is fired and | |
940 | * reprogramming happens in the interrupt handler. This is a | |
941 | * rare case and less expensive than a smp call. | |
942 | */ | |
237fc6e7 | 943 | debug_hrtimer_deactivate(timer); |
82f67cd9 | 944 | timer_stats_hrtimer_clear_start_info(timer); |
54cdfdb4 TG |
945 | reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); |
946 | __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, | |
947 | reprogram); | |
c0a31329 TG |
948 | return 1; |
949 | } | |
950 | return 0; | |
951 | } | |
952 | ||
7f1e2ca9 PZ |
953 | int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, |
954 | unsigned long delta_ns, const enum hrtimer_mode mode, | |
955 | int wakeup) | |
c0a31329 | 956 | { |
3c8aa39d | 957 | struct hrtimer_clock_base *base, *new_base; |
c0a31329 | 958 | unsigned long flags; |
a6037b61 | 959 | int ret, leftmost; |
c0a31329 TG |
960 | |
961 | base = lock_hrtimer_base(timer, &flags); | |
962 | ||
963 | /* Remove an active timer from the queue: */ | |
964 | ret = remove_hrtimer(timer, base); | |
965 | ||
966 | /* Switch the timer base, if necessary: */ | |
597d0275 | 967 | new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); |
c0a31329 | 968 | |
597d0275 | 969 | if (mode & HRTIMER_MODE_REL) { |
5a7780e7 | 970 | tim = ktime_add_safe(tim, new_base->get_time()); |
06027bdd IM |
971 | /* |
972 | * CONFIG_TIME_LOW_RES is a temporary way for architectures | |
973 | * to signal that they simply return xtime in | |
974 | * do_gettimeoffset(). In this case we want to round up by | |
975 | * resolution when starting a relative timer, to avoid short | |
976 | * timeouts. This will go away with the GTOD framework. | |
977 | */ | |
978 | #ifdef CONFIG_TIME_LOW_RES | |
5a7780e7 | 979 | tim = ktime_add_safe(tim, base->resolution); |
06027bdd IM |
980 | #endif |
981 | } | |
237fc6e7 | 982 | |
da8f2e17 | 983 | hrtimer_set_expires_range_ns(timer, tim, delta_ns); |
c0a31329 | 984 | |
82f67cd9 IM |
985 | timer_stats_hrtimer_set_start_info(timer); |
986 | ||
a6037b61 PZ |
987 | leftmost = enqueue_hrtimer(timer, new_base); |
988 | ||
935c631d IM |
989 | /* |
990 | * Only allow reprogramming if the new base is on this CPU. | |
991 | * (it might still be on another CPU if the timer was pending) | |
a6037b61 PZ |
992 | * |
993 | * XXX send_remote_softirq() ? | |
935c631d | 994 | */ |
a6037b61 | 995 | if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) |
7f1e2ca9 | 996 | hrtimer_enqueue_reprogram(timer, new_base, wakeup); |
c0a31329 TG |
997 | |
998 | unlock_hrtimer_base(timer, &flags); | |
999 | ||
1000 | return ret; | |
1001 | } | |
7f1e2ca9 PZ |
1002 | |
1003 | /** | |
1004 | * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU | |
1005 | * @timer: the timer to be added | |
1006 | * @tim: expiry time | |
1007 | * @delta_ns: "slack" range for the timer | |
1008 | * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) | |
1009 | * | |
1010 | * Returns: | |
1011 | * 0 on success | |
1012 | * 1 when the timer was active | |
1013 | */ | |
1014 | int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | |
1015 | unsigned long delta_ns, const enum hrtimer_mode mode) | |
1016 | { | |
1017 | return __hrtimer_start_range_ns(timer, tim, delta_ns, mode, 1); | |
1018 | } | |
da8f2e17 AV |
1019 | EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); |
1020 | ||
1021 | /** | |
e1dd7bc5 | 1022 | * hrtimer_start - (re)start an hrtimer on the current CPU |
da8f2e17 AV |
1023 | * @timer: the timer to be added |
1024 | * @tim: expiry time | |
1025 | * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) | |
1026 | * | |
1027 | * Returns: | |
1028 | * 0 on success | |
1029 | * 1 when the timer was active | |
1030 | */ | |
1031 | int | |
1032 | hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) | |
1033 | { | |
7f1e2ca9 | 1034 | return __hrtimer_start_range_ns(timer, tim, 0, mode, 1); |
da8f2e17 | 1035 | } |
8d16b764 | 1036 | EXPORT_SYMBOL_GPL(hrtimer_start); |
c0a31329 | 1037 | |
da8f2e17 | 1038 | |
c0a31329 TG |
1039 | /** |
1040 | * hrtimer_try_to_cancel - try to deactivate a timer | |
c0a31329 TG |
1041 | * @timer: hrtimer to stop |
1042 | * | |
1043 | * Returns: | |
1044 | * 0 when the timer was not active | |
1045 | * 1 when the timer was active | |
1046 | * -1 when the timer is currently excuting the callback function and | |
fa9799e3 | 1047 | * cannot be stopped |
c0a31329 TG |
1048 | */ |
1049 | int hrtimer_try_to_cancel(struct hrtimer *timer) | |
1050 | { | |
3c8aa39d | 1051 | struct hrtimer_clock_base *base; |
c0a31329 TG |
1052 | unsigned long flags; |
1053 | int ret = -1; | |
1054 | ||
1055 | base = lock_hrtimer_base(timer, &flags); | |
1056 | ||
303e967f | 1057 | if (!hrtimer_callback_running(timer)) |
c0a31329 TG |
1058 | ret = remove_hrtimer(timer, base); |
1059 | ||
1060 | unlock_hrtimer_base(timer, &flags); | |
1061 | ||
1062 | return ret; | |
1063 | ||
1064 | } | |
8d16b764 | 1065 | EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel); |
c0a31329 TG |
1066 | |
1067 | /** | |
1068 | * hrtimer_cancel - cancel a timer and wait for the handler to finish. | |
c0a31329 TG |
1069 | * @timer: the timer to be cancelled |
1070 | * | |
1071 | * Returns: | |
1072 | * 0 when the timer was not active | |
1073 | * 1 when the timer was active | |
1074 | */ | |
1075 | int hrtimer_cancel(struct hrtimer *timer) | |
1076 | { | |
1077 | for (;;) { | |
1078 | int ret = hrtimer_try_to_cancel(timer); | |
1079 | ||
1080 | if (ret >= 0) | |
1081 | return ret; | |
5ef37b19 | 1082 | cpu_relax(); |
c0a31329 TG |
1083 | } |
1084 | } | |
8d16b764 | 1085 | EXPORT_SYMBOL_GPL(hrtimer_cancel); |
c0a31329 TG |
1086 | |
1087 | /** | |
1088 | * hrtimer_get_remaining - get remaining time for the timer | |
c0a31329 TG |
1089 | * @timer: the timer to read |
1090 | */ | |
1091 | ktime_t hrtimer_get_remaining(const struct hrtimer *timer) | |
1092 | { | |
3c8aa39d | 1093 | struct hrtimer_clock_base *base; |
c0a31329 TG |
1094 | unsigned long flags; |
1095 | ktime_t rem; | |
1096 | ||
1097 | base = lock_hrtimer_base(timer, &flags); | |
cc584b21 | 1098 | rem = hrtimer_expires_remaining(timer); |
c0a31329 TG |
1099 | unlock_hrtimer_base(timer, &flags); |
1100 | ||
1101 | return rem; | |
1102 | } | |
8d16b764 | 1103 | EXPORT_SYMBOL_GPL(hrtimer_get_remaining); |
c0a31329 | 1104 | |
ee9c5785 | 1105 | #ifdef CONFIG_NO_HZ |
69239749 TL |
1106 | /** |
1107 | * hrtimer_get_next_event - get the time until next expiry event | |
1108 | * | |
1109 | * Returns the delta to the next expiry event or KTIME_MAX if no timer | |
1110 | * is pending. | |
1111 | */ | |
1112 | ktime_t hrtimer_get_next_event(void) | |
1113 | { | |
3c8aa39d TG |
1114 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
1115 | struct hrtimer_clock_base *base = cpu_base->clock_base; | |
69239749 TL |
1116 | ktime_t delta, mindelta = { .tv64 = KTIME_MAX }; |
1117 | unsigned long flags; | |
1118 | int i; | |
1119 | ||
3c8aa39d TG |
1120 | spin_lock_irqsave(&cpu_base->lock, flags); |
1121 | ||
54cdfdb4 TG |
1122 | if (!hrtimer_hres_active()) { |
1123 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { | |
1124 | struct hrtimer *timer; | |
69239749 | 1125 | |
54cdfdb4 TG |
1126 | if (!base->first) |
1127 | continue; | |
3c8aa39d | 1128 | |
54cdfdb4 | 1129 | timer = rb_entry(base->first, struct hrtimer, node); |
cc584b21 | 1130 | delta.tv64 = hrtimer_get_expires_tv64(timer); |
54cdfdb4 TG |
1131 | delta = ktime_sub(delta, base->get_time()); |
1132 | if (delta.tv64 < mindelta.tv64) | |
1133 | mindelta.tv64 = delta.tv64; | |
1134 | } | |
69239749 | 1135 | } |
3c8aa39d TG |
1136 | |
1137 | spin_unlock_irqrestore(&cpu_base->lock, flags); | |
1138 | ||
69239749 TL |
1139 | if (mindelta.tv64 < 0) |
1140 | mindelta.tv64 = 0; | |
1141 | return mindelta; | |
1142 | } | |
1143 | #endif | |
1144 | ||
237fc6e7 TG |
1145 | static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, |
1146 | enum hrtimer_mode mode) | |
c0a31329 | 1147 | { |
3c8aa39d | 1148 | struct hrtimer_cpu_base *cpu_base; |
c0a31329 | 1149 | |
7978672c GA |
1150 | memset(timer, 0, sizeof(struct hrtimer)); |
1151 | ||
3c8aa39d | 1152 | cpu_base = &__raw_get_cpu_var(hrtimer_bases); |
c0a31329 | 1153 | |
c9cb2e3d | 1154 | if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS) |
7978672c GA |
1155 | clock_id = CLOCK_MONOTONIC; |
1156 | ||
3c8aa39d | 1157 | timer->base = &cpu_base->clock_base[clock_id]; |
d3d74453 | 1158 | INIT_LIST_HEAD(&timer->cb_entry); |
54cdfdb4 | 1159 | hrtimer_init_timer_hres(timer); |
82f67cd9 IM |
1160 | |
1161 | #ifdef CONFIG_TIMER_STATS | |
1162 | timer->start_site = NULL; | |
1163 | timer->start_pid = -1; | |
1164 | memset(timer->start_comm, 0, TASK_COMM_LEN); | |
1165 | #endif | |
c0a31329 | 1166 | } |
237fc6e7 TG |
1167 | |
1168 | /** | |
1169 | * hrtimer_init - initialize a timer to the given clock | |
1170 | * @timer: the timer to be initialized | |
1171 | * @clock_id: the clock to be used | |
1172 | * @mode: timer mode abs/rel | |
1173 | */ | |
1174 | void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | |
1175 | enum hrtimer_mode mode) | |
1176 | { | |
1177 | debug_hrtimer_init(timer); | |
1178 | __hrtimer_init(timer, clock_id, mode); | |
1179 | } | |
8d16b764 | 1180 | EXPORT_SYMBOL_GPL(hrtimer_init); |
c0a31329 TG |
1181 | |
1182 | /** | |
1183 | * hrtimer_get_res - get the timer resolution for a clock | |
c0a31329 TG |
1184 | * @which_clock: which clock to query |
1185 | * @tp: pointer to timespec variable to store the resolution | |
1186 | * | |
72fd4a35 RD |
1187 | * Store the resolution of the clock selected by @which_clock in the |
1188 | * variable pointed to by @tp. | |
c0a31329 TG |
1189 | */ |
1190 | int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) | |
1191 | { | |
3c8aa39d | 1192 | struct hrtimer_cpu_base *cpu_base; |
c0a31329 | 1193 | |
3c8aa39d TG |
1194 | cpu_base = &__raw_get_cpu_var(hrtimer_bases); |
1195 | *tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution); | |
c0a31329 TG |
1196 | |
1197 | return 0; | |
1198 | } | |
8d16b764 | 1199 | EXPORT_SYMBOL_GPL(hrtimer_get_res); |
c0a31329 | 1200 | |
d3d74453 PZ |
1201 | static void __run_hrtimer(struct hrtimer *timer) |
1202 | { | |
1203 | struct hrtimer_clock_base *base = timer->base; | |
1204 | struct hrtimer_cpu_base *cpu_base = base->cpu_base; | |
1205 | enum hrtimer_restart (*fn)(struct hrtimer *); | |
1206 | int restart; | |
1207 | ||
ca109491 PZ |
1208 | WARN_ON(!irqs_disabled()); |
1209 | ||
237fc6e7 | 1210 | debug_hrtimer_deactivate(timer); |
d3d74453 PZ |
1211 | __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); |
1212 | timer_stats_account_hrtimer(timer); | |
d3d74453 | 1213 | fn = timer->function; |
ca109491 PZ |
1214 | |
1215 | /* | |
1216 | * Because we run timers from hardirq context, there is no chance | |
1217 | * they get migrated to another cpu, therefore its safe to unlock | |
1218 | * the timer base. | |
1219 | */ | |
1220 | spin_unlock(&cpu_base->lock); | |
1221 | restart = fn(timer); | |
1222 | spin_lock(&cpu_base->lock); | |
d3d74453 PZ |
1223 | |
1224 | /* | |
e3f1d883 TG |
1225 | * Note: We clear the CALLBACK bit after enqueue_hrtimer and |
1226 | * we do not reprogramm the event hardware. Happens either in | |
1227 | * hrtimer_start_range_ns() or in hrtimer_interrupt() | |
d3d74453 PZ |
1228 | */ |
1229 | if (restart != HRTIMER_NORESTART) { | |
1230 | BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); | |
a6037b61 | 1231 | enqueue_hrtimer(timer, base); |
d3d74453 PZ |
1232 | } |
1233 | timer->state &= ~HRTIMER_STATE_CALLBACK; | |
1234 | } | |
1235 | ||
54cdfdb4 TG |
1236 | #ifdef CONFIG_HIGH_RES_TIMERS |
1237 | ||
7f22391c FW |
1238 | static int force_clock_reprogram; |
1239 | ||
1240 | /* | |
1241 | * After 5 iteration's attempts, we consider that hrtimer_interrupt() | |
1242 | * is hanging, which could happen with something that slows the interrupt | |
1243 | * such as the tracing. Then we force the clock reprogramming for each future | |
1244 | * hrtimer interrupts to avoid infinite loops and use the min_delta_ns | |
1245 | * threshold that we will overwrite. | |
1246 | * The next tick event will be scheduled to 3 times we currently spend on | |
1247 | * hrtimer_interrupt(). This gives a good compromise, the cpus will spend | |
1248 | * 1/4 of their time to process the hrtimer interrupts. This is enough to | |
1249 | * let it running without serious starvation. | |
1250 | */ | |
1251 | ||
1252 | static inline void | |
1253 | hrtimer_interrupt_hanging(struct clock_event_device *dev, | |
1254 | ktime_t try_time) | |
1255 | { | |
1256 | force_clock_reprogram = 1; | |
1257 | dev->min_delta_ns = (unsigned long)try_time.tv64 * 3; | |
1258 | printk(KERN_WARNING "hrtimer: interrupt too slow, " | |
1259 | "forcing clock min delta to %lu ns\n", dev->min_delta_ns); | |
1260 | } | |
54cdfdb4 TG |
1261 | /* |
1262 | * High resolution timer interrupt | |
1263 | * Called with interrupts disabled | |
1264 | */ | |
1265 | void hrtimer_interrupt(struct clock_event_device *dev) | |
1266 | { | |
1267 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | |
1268 | struct hrtimer_clock_base *base; | |
1269 | ktime_t expires_next, now; | |
7f22391c | 1270 | int nr_retries = 0; |
ca109491 | 1271 | int i; |
54cdfdb4 TG |
1272 | |
1273 | BUG_ON(!cpu_base->hres_active); | |
1274 | cpu_base->nr_events++; | |
1275 | dev->next_event.tv64 = KTIME_MAX; | |
1276 | ||
1277 | retry: | |
7f22391c FW |
1278 | /* 5 retries is enough to notice a hang */ |
1279 | if (!(++nr_retries % 5)) | |
1280 | hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now)); | |
1281 | ||
54cdfdb4 TG |
1282 | now = ktime_get(); |
1283 | ||
1284 | expires_next.tv64 = KTIME_MAX; | |
1285 | ||
6ff7041d TG |
1286 | spin_lock(&cpu_base->lock); |
1287 | /* | |
1288 | * We set expires_next to KTIME_MAX here with cpu_base->lock | |
1289 | * held to prevent that a timer is enqueued in our queue via | |
1290 | * the migration code. This does not affect enqueueing of | |
1291 | * timers which run their callback and need to be requeued on | |
1292 | * this CPU. | |
1293 | */ | |
1294 | cpu_base->expires_next.tv64 = KTIME_MAX; | |
1295 | ||
54cdfdb4 TG |
1296 | base = cpu_base->clock_base; |
1297 | ||
1298 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | |
1299 | ktime_t basenow; | |
1300 | struct rb_node *node; | |
1301 | ||
54cdfdb4 TG |
1302 | basenow = ktime_add(now, base->offset); |
1303 | ||
1304 | while ((node = base->first)) { | |
1305 | struct hrtimer *timer; | |
1306 | ||
1307 | timer = rb_entry(node, struct hrtimer, node); | |
1308 | ||
654c8e0b AV |
1309 | /* |
1310 | * The immediate goal for using the softexpires is | |
1311 | * minimizing wakeups, not running timers at the | |
1312 | * earliest interrupt after their soft expiration. | |
1313 | * This allows us to avoid using a Priority Search | |
1314 | * Tree, which can answer a stabbing querry for | |
1315 | * overlapping intervals and instead use the simple | |
1316 | * BST we already have. | |
1317 | * We don't add extra wakeups by delaying timers that | |
1318 | * are right-of a not yet expired timer, because that | |
1319 | * timer will have to trigger a wakeup anyway. | |
1320 | */ | |
1321 | ||
1322 | if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) { | |
54cdfdb4 TG |
1323 | ktime_t expires; |
1324 | ||
cc584b21 | 1325 | expires = ktime_sub(hrtimer_get_expires(timer), |
54cdfdb4 TG |
1326 | base->offset); |
1327 | if (expires.tv64 < expires_next.tv64) | |
1328 | expires_next = expires; | |
1329 | break; | |
1330 | } | |
1331 | ||
d3d74453 | 1332 | __run_hrtimer(timer); |
54cdfdb4 | 1333 | } |
54cdfdb4 TG |
1334 | base++; |
1335 | } | |
1336 | ||
6ff7041d TG |
1337 | /* |
1338 | * Store the new expiry value so the migration code can verify | |
1339 | * against it. | |
1340 | */ | |
54cdfdb4 | 1341 | cpu_base->expires_next = expires_next; |
6ff7041d | 1342 | spin_unlock(&cpu_base->lock); |
54cdfdb4 TG |
1343 | |
1344 | /* Reprogramming necessary ? */ | |
1345 | if (expires_next.tv64 != KTIME_MAX) { | |
7f22391c | 1346 | if (tick_program_event(expires_next, force_clock_reprogram)) |
54cdfdb4 TG |
1347 | goto retry; |
1348 | } | |
54cdfdb4 TG |
1349 | } |
1350 | ||
8bdec955 TG |
1351 | /* |
1352 | * local version of hrtimer_peek_ahead_timers() called with interrupts | |
1353 | * disabled. | |
1354 | */ | |
1355 | static void __hrtimer_peek_ahead_timers(void) | |
1356 | { | |
1357 | struct tick_device *td; | |
1358 | ||
1359 | if (!hrtimer_hres_active()) | |
1360 | return; | |
1361 | ||
1362 | td = &__get_cpu_var(tick_cpu_device); | |
1363 | if (td && td->evtdev) | |
1364 | hrtimer_interrupt(td->evtdev); | |
1365 | } | |
1366 | ||
2e94d1f7 AV |
1367 | /** |
1368 | * hrtimer_peek_ahead_timers -- run soft-expired timers now | |
1369 | * | |
1370 | * hrtimer_peek_ahead_timers will peek at the timer queue of | |
1371 | * the current cpu and check if there are any timers for which | |
1372 | * the soft expires time has passed. If any such timers exist, | |
1373 | * they are run immediately and then removed from the timer queue. | |
1374 | * | |
1375 | */ | |
1376 | void hrtimer_peek_ahead_timers(void) | |
1377 | { | |
643bdf68 | 1378 | unsigned long flags; |
dc4304f7 | 1379 | |
2e94d1f7 | 1380 | local_irq_save(flags); |
8bdec955 | 1381 | __hrtimer_peek_ahead_timers(); |
2e94d1f7 AV |
1382 | local_irq_restore(flags); |
1383 | } | |
1384 | ||
a6037b61 PZ |
1385 | static void run_hrtimer_softirq(struct softirq_action *h) |
1386 | { | |
1387 | hrtimer_peek_ahead_timers(); | |
1388 | } | |
1389 | ||
82c5b7b5 IM |
1390 | #else /* CONFIG_HIGH_RES_TIMERS */ |
1391 | ||
1392 | static inline void __hrtimer_peek_ahead_timers(void) { } | |
1393 | ||
1394 | #endif /* !CONFIG_HIGH_RES_TIMERS */ | |
82f67cd9 | 1395 | |
d3d74453 PZ |
1396 | /* |
1397 | * Called from timer softirq every jiffy, expire hrtimers: | |
1398 | * | |
1399 | * For HRT its the fall back code to run the softirq in the timer | |
1400 | * softirq context in case the hrtimer initialization failed or has | |
1401 | * not been done yet. | |
1402 | */ | |
1403 | void hrtimer_run_pending(void) | |
1404 | { | |
d3d74453 PZ |
1405 | if (hrtimer_hres_active()) |
1406 | return; | |
54cdfdb4 | 1407 | |
d3d74453 PZ |
1408 | /* |
1409 | * This _is_ ugly: We have to check in the softirq context, | |
1410 | * whether we can switch to highres and / or nohz mode. The | |
1411 | * clocksource switch happens in the timer interrupt with | |
1412 | * xtime_lock held. Notification from there only sets the | |
1413 | * check bit in the tick_oneshot code, otherwise we might | |
1414 | * deadlock vs. xtime_lock. | |
1415 | */ | |
1416 | if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) | |
1417 | hrtimer_switch_to_hres(); | |
54cdfdb4 TG |
1418 | } |
1419 | ||
c0a31329 | 1420 | /* |
d3d74453 | 1421 | * Called from hardirq context every jiffy |
c0a31329 | 1422 | */ |
833883d9 | 1423 | void hrtimer_run_queues(void) |
c0a31329 | 1424 | { |
288867ec | 1425 | struct rb_node *node; |
833883d9 DS |
1426 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
1427 | struct hrtimer_clock_base *base; | |
1428 | int index, gettime = 1; | |
c0a31329 | 1429 | |
833883d9 | 1430 | if (hrtimer_hres_active()) |
3055adda DS |
1431 | return; |
1432 | ||
833883d9 DS |
1433 | for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { |
1434 | base = &cpu_base->clock_base[index]; | |
c0a31329 | 1435 | |
833883d9 | 1436 | if (!base->first) |
d3d74453 | 1437 | continue; |
833883d9 | 1438 | |
d7cfb60c | 1439 | if (gettime) { |
833883d9 DS |
1440 | hrtimer_get_softirq_time(cpu_base); |
1441 | gettime = 0; | |
b75f7a51 | 1442 | } |
d3d74453 | 1443 | |
833883d9 | 1444 | spin_lock(&cpu_base->lock); |
c0a31329 | 1445 | |
833883d9 DS |
1446 | while ((node = base->first)) { |
1447 | struct hrtimer *timer; | |
54cdfdb4 | 1448 | |
833883d9 | 1449 | timer = rb_entry(node, struct hrtimer, node); |
cc584b21 AV |
1450 | if (base->softirq_time.tv64 <= |
1451 | hrtimer_get_expires_tv64(timer)) | |
833883d9 DS |
1452 | break; |
1453 | ||
833883d9 DS |
1454 | __run_hrtimer(timer); |
1455 | } | |
1456 | spin_unlock(&cpu_base->lock); | |
1457 | } | |
c0a31329 TG |
1458 | } |
1459 | ||
10c94ec1 TG |
1460 | /* |
1461 | * Sleep related functions: | |
1462 | */ | |
c9cb2e3d | 1463 | static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) |
00362e33 TG |
1464 | { |
1465 | struct hrtimer_sleeper *t = | |
1466 | container_of(timer, struct hrtimer_sleeper, timer); | |
1467 | struct task_struct *task = t->task; | |
1468 | ||
1469 | t->task = NULL; | |
1470 | if (task) | |
1471 | wake_up_process(task); | |
1472 | ||
1473 | return HRTIMER_NORESTART; | |
1474 | } | |
1475 | ||
36c8b586 | 1476 | void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) |
00362e33 TG |
1477 | { |
1478 | sl->timer.function = hrtimer_wakeup; | |
1479 | sl->task = task; | |
1480 | } | |
2bc481cf | 1481 | EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); |
00362e33 | 1482 | |
669d7868 | 1483 | static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) |
432569bb | 1484 | { |
669d7868 | 1485 | hrtimer_init_sleeper(t, current); |
10c94ec1 | 1486 | |
432569bb RZ |
1487 | do { |
1488 | set_current_state(TASK_INTERRUPTIBLE); | |
cc584b21 | 1489 | hrtimer_start_expires(&t->timer, mode); |
37bb6cb4 PZ |
1490 | if (!hrtimer_active(&t->timer)) |
1491 | t->task = NULL; | |
432569bb | 1492 | |
54cdfdb4 TG |
1493 | if (likely(t->task)) |
1494 | schedule(); | |
432569bb | 1495 | |
669d7868 | 1496 | hrtimer_cancel(&t->timer); |
c9cb2e3d | 1497 | mode = HRTIMER_MODE_ABS; |
669d7868 TG |
1498 | |
1499 | } while (t->task && !signal_pending(current)); | |
432569bb | 1500 | |
3588a085 PZ |
1501 | __set_current_state(TASK_RUNNING); |
1502 | ||
669d7868 | 1503 | return t->task == NULL; |
10c94ec1 TG |
1504 | } |
1505 | ||
080344b9 ON |
1506 | static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp) |
1507 | { | |
1508 | struct timespec rmt; | |
1509 | ktime_t rem; | |
1510 | ||
cc584b21 | 1511 | rem = hrtimer_expires_remaining(timer); |
080344b9 ON |
1512 | if (rem.tv64 <= 0) |
1513 | return 0; | |
1514 | rmt = ktime_to_timespec(rem); | |
1515 | ||
1516 | if (copy_to_user(rmtp, &rmt, sizeof(*rmtp))) | |
1517 | return -EFAULT; | |
1518 | ||
1519 | return 1; | |
1520 | } | |
1521 | ||
1711ef38 | 1522 | long __sched hrtimer_nanosleep_restart(struct restart_block *restart) |
10c94ec1 | 1523 | { |
669d7868 | 1524 | struct hrtimer_sleeper t; |
080344b9 | 1525 | struct timespec __user *rmtp; |
237fc6e7 | 1526 | int ret = 0; |
10c94ec1 | 1527 | |
237fc6e7 TG |
1528 | hrtimer_init_on_stack(&t.timer, restart->nanosleep.index, |
1529 | HRTIMER_MODE_ABS); | |
cc584b21 | 1530 | hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); |
10c94ec1 | 1531 | |
c9cb2e3d | 1532 | if (do_nanosleep(&t, HRTIMER_MODE_ABS)) |
237fc6e7 | 1533 | goto out; |
10c94ec1 | 1534 | |
029a07e0 | 1535 | rmtp = restart->nanosleep.rmtp; |
432569bb | 1536 | if (rmtp) { |
237fc6e7 | 1537 | ret = update_rmtp(&t.timer, rmtp); |
080344b9 | 1538 | if (ret <= 0) |
237fc6e7 | 1539 | goto out; |
432569bb | 1540 | } |
10c94ec1 | 1541 | |
10c94ec1 | 1542 | /* The other values in restart are already filled in */ |
237fc6e7 TG |
1543 | ret = -ERESTART_RESTARTBLOCK; |
1544 | out: | |
1545 | destroy_hrtimer_on_stack(&t.timer); | |
1546 | return ret; | |
10c94ec1 TG |
1547 | } |
1548 | ||
080344b9 | 1549 | long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, |
10c94ec1 TG |
1550 | const enum hrtimer_mode mode, const clockid_t clockid) |
1551 | { | |
1552 | struct restart_block *restart; | |
669d7868 | 1553 | struct hrtimer_sleeper t; |
237fc6e7 | 1554 | int ret = 0; |
3bd01206 AV |
1555 | unsigned long slack; |
1556 | ||
1557 | slack = current->timer_slack_ns; | |
1558 | if (rt_task(current)) | |
1559 | slack = 0; | |
10c94ec1 | 1560 | |
237fc6e7 | 1561 | hrtimer_init_on_stack(&t.timer, clockid, mode); |
3bd01206 | 1562 | hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack); |
432569bb | 1563 | if (do_nanosleep(&t, mode)) |
237fc6e7 | 1564 | goto out; |
10c94ec1 | 1565 | |
7978672c | 1566 | /* Absolute timers do not update the rmtp value and restart: */ |
237fc6e7 TG |
1567 | if (mode == HRTIMER_MODE_ABS) { |
1568 | ret = -ERESTARTNOHAND; | |
1569 | goto out; | |
1570 | } | |
10c94ec1 | 1571 | |
432569bb | 1572 | if (rmtp) { |
237fc6e7 | 1573 | ret = update_rmtp(&t.timer, rmtp); |
080344b9 | 1574 | if (ret <= 0) |
237fc6e7 | 1575 | goto out; |
432569bb | 1576 | } |
10c94ec1 TG |
1577 | |
1578 | restart = ¤t_thread_info()->restart_block; | |
1711ef38 | 1579 | restart->fn = hrtimer_nanosleep_restart; |
029a07e0 TG |
1580 | restart->nanosleep.index = t.timer.base->index; |
1581 | restart->nanosleep.rmtp = rmtp; | |
cc584b21 | 1582 | restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); |
10c94ec1 | 1583 | |
237fc6e7 TG |
1584 | ret = -ERESTART_RESTARTBLOCK; |
1585 | out: | |
1586 | destroy_hrtimer_on_stack(&t.timer); | |
1587 | return ret; | |
10c94ec1 TG |
1588 | } |
1589 | ||
58fd3aa2 HC |
1590 | SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, |
1591 | struct timespec __user *, rmtp) | |
6ba1b912 | 1592 | { |
080344b9 | 1593 | struct timespec tu; |
6ba1b912 TG |
1594 | |
1595 | if (copy_from_user(&tu, rqtp, sizeof(tu))) | |
1596 | return -EFAULT; | |
1597 | ||
1598 | if (!timespec_valid(&tu)) | |
1599 | return -EINVAL; | |
1600 | ||
080344b9 | 1601 | return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC); |
6ba1b912 TG |
1602 | } |
1603 | ||
c0a31329 TG |
1604 | /* |
1605 | * Functions related to boot-time initialization: | |
1606 | */ | |
0ec160dd | 1607 | static void __cpuinit init_hrtimers_cpu(int cpu) |
c0a31329 | 1608 | { |
3c8aa39d | 1609 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); |
c0a31329 TG |
1610 | int i; |
1611 | ||
3c8aa39d | 1612 | spin_lock_init(&cpu_base->lock); |
3c8aa39d TG |
1613 | |
1614 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) | |
1615 | cpu_base->clock_base[i].cpu_base = cpu_base; | |
1616 | ||
54cdfdb4 | 1617 | hrtimer_init_hres(cpu_base); |
c0a31329 TG |
1618 | } |
1619 | ||
1620 | #ifdef CONFIG_HOTPLUG_CPU | |
1621 | ||
ca109491 | 1622 | static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, |
37810659 | 1623 | struct hrtimer_clock_base *new_base) |
c0a31329 TG |
1624 | { |
1625 | struct hrtimer *timer; | |
1626 | struct rb_node *node; | |
1627 | ||
1628 | while ((node = rb_first(&old_base->active))) { | |
1629 | timer = rb_entry(node, struct hrtimer, node); | |
54cdfdb4 | 1630 | BUG_ON(hrtimer_callback_running(timer)); |
237fc6e7 | 1631 | debug_hrtimer_deactivate(timer); |
b00c1a99 TG |
1632 | |
1633 | /* | |
1634 | * Mark it as STATE_MIGRATE not INACTIVE otherwise the | |
1635 | * timer could be seen as !active and just vanish away | |
1636 | * under us on another CPU | |
1637 | */ | |
1638 | __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0); | |
c0a31329 | 1639 | timer->base = new_base; |
54cdfdb4 | 1640 | /* |
e3f1d883 TG |
1641 | * Enqueue the timers on the new cpu. This does not |
1642 | * reprogram the event device in case the timer | |
1643 | * expires before the earliest on this CPU, but we run | |
1644 | * hrtimer_interrupt after we migrated everything to | |
1645 | * sort out already expired timers and reprogram the | |
1646 | * event device. | |
54cdfdb4 | 1647 | */ |
a6037b61 | 1648 | enqueue_hrtimer(timer, new_base); |
41e1022e | 1649 | |
b00c1a99 TG |
1650 | /* Clear the migration state bit */ |
1651 | timer->state &= ~HRTIMER_STATE_MIGRATE; | |
c0a31329 TG |
1652 | } |
1653 | } | |
1654 | ||
d5fd43c4 | 1655 | static void migrate_hrtimers(int scpu) |
c0a31329 | 1656 | { |
3c8aa39d | 1657 | struct hrtimer_cpu_base *old_base, *new_base; |
731a55ba | 1658 | int i; |
c0a31329 | 1659 | |
37810659 | 1660 | BUG_ON(cpu_online(scpu)); |
37810659 | 1661 | tick_cancel_sched_timer(scpu); |
731a55ba TG |
1662 | |
1663 | local_irq_disable(); | |
1664 | old_base = &per_cpu(hrtimer_bases, scpu); | |
1665 | new_base = &__get_cpu_var(hrtimer_bases); | |
d82f0b0f ON |
1666 | /* |
1667 | * The caller is globally serialized and nobody else | |
1668 | * takes two locks at once, deadlock is not possible. | |
1669 | */ | |
731a55ba | 1670 | spin_lock(&new_base->lock); |
8e60e05f | 1671 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); |
c0a31329 | 1672 | |
3c8aa39d | 1673 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
ca109491 | 1674 | migrate_hrtimer_list(&old_base->clock_base[i], |
37810659 | 1675 | &new_base->clock_base[i]); |
c0a31329 TG |
1676 | } |
1677 | ||
8e60e05f | 1678 | spin_unlock(&old_base->lock); |
731a55ba | 1679 | spin_unlock(&new_base->lock); |
37810659 | 1680 | |
731a55ba TG |
1681 | /* Check, if we got expired work to do */ |
1682 | __hrtimer_peek_ahead_timers(); | |
1683 | local_irq_enable(); | |
c0a31329 | 1684 | } |
37810659 | 1685 | |
c0a31329 TG |
1686 | #endif /* CONFIG_HOTPLUG_CPU */ |
1687 | ||
8c78f307 | 1688 | static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, |
c0a31329 TG |
1689 | unsigned long action, void *hcpu) |
1690 | { | |
b2e3c0ad | 1691 | int scpu = (long)hcpu; |
c0a31329 TG |
1692 | |
1693 | switch (action) { | |
1694 | ||
1695 | case CPU_UP_PREPARE: | |
8bb78442 | 1696 | case CPU_UP_PREPARE_FROZEN: |
37810659 | 1697 | init_hrtimers_cpu(scpu); |
c0a31329 TG |
1698 | break; |
1699 | ||
1700 | #ifdef CONFIG_HOTPLUG_CPU | |
94df7de0 SD |
1701 | case CPU_DYING: |
1702 | case CPU_DYING_FROZEN: | |
1703 | clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu); | |
1704 | break; | |
c0a31329 | 1705 | case CPU_DEAD: |
8bb78442 | 1706 | case CPU_DEAD_FROZEN: |
b2e3c0ad | 1707 | { |
37810659 | 1708 | clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu); |
d5fd43c4 | 1709 | migrate_hrtimers(scpu); |
c0a31329 | 1710 | break; |
b2e3c0ad | 1711 | } |
c0a31329 TG |
1712 | #endif |
1713 | ||
1714 | default: | |
1715 | break; | |
1716 | } | |
1717 | ||
1718 | return NOTIFY_OK; | |
1719 | } | |
1720 | ||
8c78f307 | 1721 | static struct notifier_block __cpuinitdata hrtimers_nb = { |
c0a31329 TG |
1722 | .notifier_call = hrtimer_cpu_notify, |
1723 | }; | |
1724 | ||
1725 | void __init hrtimers_init(void) | |
1726 | { | |
1727 | hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, | |
1728 | (void *)(long)smp_processor_id()); | |
1729 | register_cpu_notifier(&hrtimers_nb); | |
a6037b61 PZ |
1730 | #ifdef CONFIG_HIGH_RES_TIMERS |
1731 | open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq); | |
1732 | #endif | |
c0a31329 TG |
1733 | } |
1734 | ||
7bb67439 | 1735 | /** |
654c8e0b | 1736 | * schedule_hrtimeout_range - sleep until timeout |
7bb67439 | 1737 | * @expires: timeout value (ktime_t) |
654c8e0b | 1738 | * @delta: slack in expires timeout (ktime_t) |
7bb67439 AV |
1739 | * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL |
1740 | * | |
1741 | * Make the current task sleep until the given expiry time has | |
1742 | * elapsed. The routine will return immediately unless | |
1743 | * the current task state has been set (see set_current_state()). | |
1744 | * | |
654c8e0b AV |
1745 | * The @delta argument gives the kernel the freedom to schedule the |
1746 | * actual wakeup to a time that is both power and performance friendly. | |
1747 | * The kernel give the normal best effort behavior for "@expires+@delta", | |
1748 | * but may decide to fire the timer earlier, but no earlier than @expires. | |
1749 | * | |
7bb67439 AV |
1750 | * You can set the task state as follows - |
1751 | * | |
1752 | * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to | |
1753 | * pass before the routine returns. | |
1754 | * | |
1755 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | |
1756 | * delivered to the current task. | |
1757 | * | |
1758 | * The current task state is guaranteed to be TASK_RUNNING when this | |
1759 | * routine returns. | |
1760 | * | |
1761 | * Returns 0 when the timer has expired otherwise -EINTR | |
1762 | */ | |
654c8e0b | 1763 | int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, |
7bb67439 AV |
1764 | const enum hrtimer_mode mode) |
1765 | { | |
1766 | struct hrtimer_sleeper t; | |
1767 | ||
1768 | /* | |
1769 | * Optimize when a zero timeout value is given. It does not | |
1770 | * matter whether this is an absolute or a relative time. | |
1771 | */ | |
1772 | if (expires && !expires->tv64) { | |
1773 | __set_current_state(TASK_RUNNING); | |
1774 | return 0; | |
1775 | } | |
1776 | ||
1777 | /* | |
1778 | * A NULL parameter means "inifinte" | |
1779 | */ | |
1780 | if (!expires) { | |
1781 | schedule(); | |
1782 | __set_current_state(TASK_RUNNING); | |
1783 | return -EINTR; | |
1784 | } | |
1785 | ||
1786 | hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, mode); | |
654c8e0b | 1787 | hrtimer_set_expires_range_ns(&t.timer, *expires, delta); |
7bb67439 AV |
1788 | |
1789 | hrtimer_init_sleeper(&t, current); | |
1790 | ||
cc584b21 | 1791 | hrtimer_start_expires(&t.timer, mode); |
7bb67439 AV |
1792 | if (!hrtimer_active(&t.timer)) |
1793 | t.task = NULL; | |
1794 | ||
1795 | if (likely(t.task)) | |
1796 | schedule(); | |
1797 | ||
1798 | hrtimer_cancel(&t.timer); | |
1799 | destroy_hrtimer_on_stack(&t.timer); | |
1800 | ||
1801 | __set_current_state(TASK_RUNNING); | |
1802 | ||
1803 | return !t.task ? 0 : -EINTR; | |
1804 | } | |
654c8e0b AV |
1805 | EXPORT_SYMBOL_GPL(schedule_hrtimeout_range); |
1806 | ||
1807 | /** | |
1808 | * schedule_hrtimeout - sleep until timeout | |
1809 | * @expires: timeout value (ktime_t) | |
1810 | * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL | |
1811 | * | |
1812 | * Make the current task sleep until the given expiry time has | |
1813 | * elapsed. The routine will return immediately unless | |
1814 | * the current task state has been set (see set_current_state()). | |
1815 | * | |
1816 | * You can set the task state as follows - | |
1817 | * | |
1818 | * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to | |
1819 | * pass before the routine returns. | |
1820 | * | |
1821 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | |
1822 | * delivered to the current task. | |
1823 | * | |
1824 | * The current task state is guaranteed to be TASK_RUNNING when this | |
1825 | * routine returns. | |
1826 | * | |
1827 | * Returns 0 when the timer has expired otherwise -EINTR | |
1828 | */ | |
1829 | int __sched schedule_hrtimeout(ktime_t *expires, | |
1830 | const enum hrtimer_mode mode) | |
1831 | { | |
1832 | return schedule_hrtimeout_range(expires, 0, mode); | |
1833 | } | |
7bb67439 | 1834 | EXPORT_SYMBOL_GPL(schedule_hrtimeout); |