Commit | Line | Data |
---|---|---|
c0a31329 TG |
1 | /* |
2 | * linux/kernel/hrtimer.c | |
3 | * | |
3c8aa39d | 4 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> |
79bf2bb3 | 5 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar |
54cdfdb4 | 6 | * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner |
c0a31329 TG |
7 | * |
8 | * High-resolution kernel timers | |
9 | * | |
10 | * In contrast to the low-resolution timeout API implemented in | |
11 | * kernel/timer.c, hrtimers provide finer resolution and accuracy | |
12 | * depending on system configuration and capabilities. | |
13 | * | |
14 | * These timers are currently used for: | |
15 | * - itimers | |
16 | * - POSIX timers | |
17 | * - nanosleep | |
18 | * - precise in-kernel timing | |
19 | * | |
20 | * Started by: Thomas Gleixner and Ingo Molnar | |
21 | * | |
22 | * Credits: | |
23 | * based on kernel/timer.c | |
24 | * | |
66188fae TG |
25 | * Help, testing, suggestions, bugfixes, improvements were |
26 | * provided by: | |
27 | * | |
28 | * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel | |
29 | * et. al. | |
30 | * | |
c0a31329 TG |
31 | * For licencing details see kernel-base/COPYING |
32 | */ | |
33 | ||
34 | #include <linux/cpu.h> | |
9984de1a | 35 | #include <linux/export.h> |
c0a31329 TG |
36 | #include <linux/percpu.h> |
37 | #include <linux/hrtimer.h> | |
38 | #include <linux/notifier.h> | |
39 | #include <linux/syscalls.h> | |
54cdfdb4 | 40 | #include <linux/kallsyms.h> |
c0a31329 | 41 | #include <linux/interrupt.h> |
79bf2bb3 | 42 | #include <linux/tick.h> |
54cdfdb4 TG |
43 | #include <linux/seq_file.h> |
44 | #include <linux/err.h> | |
237fc6e7 | 45 | #include <linux/debugobjects.h> |
174cd4b1 | 46 | #include <linux/sched/signal.h> |
cf4aebc2 | 47 | #include <linux/sched/sysctl.h> |
8bd75c77 | 48 | #include <linux/sched/rt.h> |
aab03e05 | 49 | #include <linux/sched/deadline.h> |
eea08f32 | 50 | #include <linux/timer.h> |
b0f8c44f | 51 | #include <linux/freezer.h> |
c0a31329 | 52 | |
7c0f6ba6 | 53 | #include <linux/uaccess.h> |
c0a31329 | 54 | |
c6a2a177 XG |
55 | #include <trace/events/timer.h> |
56 | ||
c1797baf | 57 | #include "tick-internal.h" |
8b094cd0 | 58 | |
c0a31329 TG |
59 | /* |
60 | * The timer bases: | |
7978672c | 61 | * |
571af55a | 62 | * There are more clockids than hrtimer bases. Thus, we index |
e06383db JS |
63 | * into the timer bases by the hrtimer_base_type enum. When trying |
64 | * to reach a base using a clockid, hrtimer_clockid_to_base() | |
65 | * is used to convert from clockid to the proper hrtimer_base_type. | |
c0a31329 | 66 | */ |
54cdfdb4 | 67 | DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = |
c0a31329 | 68 | { |
84cc8fd2 | 69 | .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock), |
887d9dc9 | 70 | .seq = SEQCNT_ZERO(hrtimer_bases.seq), |
3c8aa39d | 71 | .clock_base = |
c0a31329 | 72 | { |
3c8aa39d | 73 | { |
ab8177bc TG |
74 | .index = HRTIMER_BASE_MONOTONIC, |
75 | .clockid = CLOCK_MONOTONIC, | |
3c8aa39d | 76 | .get_time = &ktime_get, |
3c8aa39d | 77 | }, |
68fa61c0 TG |
78 | { |
79 | .index = HRTIMER_BASE_REALTIME, | |
80 | .clockid = CLOCK_REALTIME, | |
81 | .get_time = &ktime_get_real, | |
68fa61c0 | 82 | }, |
70a08cca | 83 | { |
ab8177bc TG |
84 | .index = HRTIMER_BASE_BOOTTIME, |
85 | .clockid = CLOCK_BOOTTIME, | |
70a08cca | 86 | .get_time = &ktime_get_boottime, |
70a08cca | 87 | }, |
90adda98 JS |
88 | { |
89 | .index = HRTIMER_BASE_TAI, | |
90 | .clockid = CLOCK_TAI, | |
91 | .get_time = &ktime_get_clocktai, | |
90adda98 | 92 | }, |
3c8aa39d | 93 | } |
c0a31329 TG |
94 | }; |
95 | ||
942c3c5c | 96 | static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = { |
336a9cde MZ |
97 | /* Make sure we catch unsupported clockids */ |
98 | [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES, | |
99 | ||
ce31332d TG |
100 | [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME, |
101 | [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC, | |
102 | [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME, | |
90adda98 | 103 | [CLOCK_TAI] = HRTIMER_BASE_TAI, |
ce31332d | 104 | }; |
e06383db | 105 | |
c0a31329 TG |
106 | /* |
107 | * Functions and macros which are different for UP/SMP systems are kept in a | |
108 | * single place | |
109 | */ | |
110 | #ifdef CONFIG_SMP | |
111 | ||
887d9dc9 PZ |
112 | /* |
113 | * We require the migration_base for lock_hrtimer_base()/switch_hrtimer_base() | |
114 | * such that hrtimer_callback_running() can unconditionally dereference | |
115 | * timer->base->cpu_base | |
116 | */ | |
117 | static struct hrtimer_cpu_base migration_cpu_base = { | |
118 | .seq = SEQCNT_ZERO(migration_cpu_base), | |
119 | .clock_base = { { .cpu_base = &migration_cpu_base, }, }, | |
120 | }; | |
121 | ||
122 | #define migration_base migration_cpu_base.clock_base[0] | |
123 | ||
c0a31329 TG |
124 | /* |
125 | * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock | |
126 | * means that all timers which are tied to this base via timer->base are | |
127 | * locked, and the base itself is locked too. | |
128 | * | |
129 | * So __run_timers/migrate_timers can safely modify all timers which could | |
130 | * be found on the lists/queues. | |
131 | * | |
132 | * When the timer's base is locked, and the timer removed from list, it is | |
887d9dc9 PZ |
133 | * possible to set timer->base = &migration_base and drop the lock: the timer |
134 | * remains locked. | |
c0a31329 | 135 | */ |
3c8aa39d TG |
136 | static |
137 | struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, | |
138 | unsigned long *flags) | |
c0a31329 | 139 | { |
3c8aa39d | 140 | struct hrtimer_clock_base *base; |
c0a31329 TG |
141 | |
142 | for (;;) { | |
143 | base = timer->base; | |
887d9dc9 | 144 | if (likely(base != &migration_base)) { |
ecb49d1a | 145 | raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); |
c0a31329 TG |
146 | if (likely(base == timer->base)) |
147 | return base; | |
148 | /* The timer has migrated to another CPU: */ | |
ecb49d1a | 149 | raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags); |
c0a31329 TG |
150 | } |
151 | cpu_relax(); | |
152 | } | |
153 | } | |
154 | ||
6ff7041d TG |
155 | /* |
156 | * With HIGHRES=y we do not migrate the timer when it is expiring | |
157 | * before the next event on the target cpu because we cannot reprogram | |
158 | * the target cpu hardware and we would cause it to fire late. | |
159 | * | |
160 | * Called with cpu_base->lock of target cpu held. | |
161 | */ | |
162 | static int | |
163 | hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base) | |
164 | { | |
165 | #ifdef CONFIG_HIGH_RES_TIMERS | |
166 | ktime_t expires; | |
167 | ||
168 | if (!new_base->cpu_base->hres_active) | |
169 | return 0; | |
170 | ||
171 | expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); | |
2456e855 | 172 | return expires <= new_base->cpu_base->expires_next; |
6ff7041d TG |
173 | #else |
174 | return 0; | |
175 | #endif | |
176 | } | |
177 | ||
86721ab6 | 178 | #ifdef CONFIG_NO_HZ_COMMON |
bc7a34b8 TG |
179 | static inline |
180 | struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, | |
181 | int pinned) | |
182 | { | |
183 | if (pinned || !base->migration_enabled) | |
662b3e19 | 184 | return base; |
bc7a34b8 TG |
185 | return &per_cpu(hrtimer_bases, get_nohz_timer_target()); |
186 | } | |
187 | #else | |
188 | static inline | |
189 | struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, | |
190 | int pinned) | |
191 | { | |
662b3e19 | 192 | return base; |
bc7a34b8 TG |
193 | } |
194 | #endif | |
195 | ||
c0a31329 | 196 | /* |
b48362d8 FW |
197 | * We switch the timer base to a power-optimized selected CPU target, |
198 | * if: | |
199 | * - NO_HZ_COMMON is enabled | |
200 | * - timer migration is enabled | |
201 | * - the timer callback is not running | |
202 | * - the timer is not the first expiring timer on the new target | |
203 | * | |
204 | * If one of the above requirements is not fulfilled we move the timer | |
205 | * to the current CPU or leave it on the previously assigned CPU if | |
206 | * the timer callback is currently running. | |
c0a31329 | 207 | */ |
3c8aa39d | 208 | static inline struct hrtimer_clock_base * |
597d0275 AB |
209 | switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, |
210 | int pinned) | |
c0a31329 | 211 | { |
b48362d8 | 212 | struct hrtimer_cpu_base *new_cpu_base, *this_cpu_base; |
3c8aa39d | 213 | struct hrtimer_clock_base *new_base; |
ab8177bc | 214 | int basenum = base->index; |
c0a31329 | 215 | |
b48362d8 FW |
216 | this_cpu_base = this_cpu_ptr(&hrtimer_bases); |
217 | new_cpu_base = get_target_base(this_cpu_base, pinned); | |
eea08f32 | 218 | again: |
e06383db | 219 | new_base = &new_cpu_base->clock_base[basenum]; |
c0a31329 TG |
220 | |
221 | if (base != new_base) { | |
222 | /* | |
6ff7041d | 223 | * We are trying to move timer to new_base. |
c0a31329 TG |
224 | * However we can't change timer's base while it is running, |
225 | * so we keep it on the same CPU. No hassle vs. reprogramming | |
226 | * the event source in the high resolution case. The softirq | |
227 | * code will take care of this when the timer function has | |
228 | * completed. There is no conflict as we hold the lock until | |
229 | * the timer is enqueued. | |
230 | */ | |
54cdfdb4 | 231 | if (unlikely(hrtimer_callback_running(timer))) |
c0a31329 TG |
232 | return base; |
233 | ||
887d9dc9 PZ |
234 | /* See the comment in lock_hrtimer_base() */ |
235 | timer->base = &migration_base; | |
ecb49d1a TG |
236 | raw_spin_unlock(&base->cpu_base->lock); |
237 | raw_spin_lock(&new_base->cpu_base->lock); | |
eea08f32 | 238 | |
b48362d8 | 239 | if (new_cpu_base != this_cpu_base && |
bc7a34b8 | 240 | hrtimer_check_target(timer, new_base)) { |
ecb49d1a TG |
241 | raw_spin_unlock(&new_base->cpu_base->lock); |
242 | raw_spin_lock(&base->cpu_base->lock); | |
b48362d8 | 243 | new_cpu_base = this_cpu_base; |
6ff7041d TG |
244 | timer->base = base; |
245 | goto again; | |
eea08f32 | 246 | } |
c0a31329 | 247 | timer->base = new_base; |
012a45e3 | 248 | } else { |
b48362d8 | 249 | if (new_cpu_base != this_cpu_base && |
bc7a34b8 | 250 | hrtimer_check_target(timer, new_base)) { |
b48362d8 | 251 | new_cpu_base = this_cpu_base; |
012a45e3 LM |
252 | goto again; |
253 | } | |
c0a31329 TG |
254 | } |
255 | return new_base; | |
256 | } | |
257 | ||
258 | #else /* CONFIG_SMP */ | |
259 | ||
3c8aa39d | 260 | static inline struct hrtimer_clock_base * |
c0a31329 TG |
261 | lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) |
262 | { | |
3c8aa39d | 263 | struct hrtimer_clock_base *base = timer->base; |
c0a31329 | 264 | |
ecb49d1a | 265 | raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); |
c0a31329 TG |
266 | |
267 | return base; | |
268 | } | |
269 | ||
eea08f32 | 270 | # define switch_hrtimer_base(t, b, p) (b) |
c0a31329 TG |
271 | |
272 | #endif /* !CONFIG_SMP */ | |
273 | ||
274 | /* | |
275 | * Functions for the union type storage format of ktime_t which are | |
276 | * too large for inlining: | |
277 | */ | |
278 | #if BITS_PER_LONG < 64 | |
c0a31329 TG |
279 | /* |
280 | * Divide a ktime value by a nanosecond value | |
281 | */ | |
f7bcb70e | 282 | s64 __ktime_divns(const ktime_t kt, s64 div) |
c0a31329 | 283 | { |
c0a31329 | 284 | int sft = 0; |
f7bcb70e JS |
285 | s64 dclc; |
286 | u64 tmp; | |
c0a31329 | 287 | |
900cfa46 | 288 | dclc = ktime_to_ns(kt); |
f7bcb70e JS |
289 | tmp = dclc < 0 ? -dclc : dclc; |
290 | ||
c0a31329 TG |
291 | /* Make sure the divisor is less than 2^32: */ |
292 | while (div >> 32) { | |
293 | sft++; | |
294 | div >>= 1; | |
295 | } | |
f7bcb70e JS |
296 | tmp >>= sft; |
297 | do_div(tmp, (unsigned long) div); | |
298 | return dclc < 0 ? -tmp : tmp; | |
c0a31329 | 299 | } |
8b618628 | 300 | EXPORT_SYMBOL_GPL(__ktime_divns); |
c0a31329 TG |
301 | #endif /* BITS_PER_LONG >= 64 */ |
302 | ||
5a7780e7 TG |
303 | /* |
304 | * Add two ktime values and do a safety check for overflow: | |
305 | */ | |
306 | ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs) | |
307 | { | |
979515c5 | 308 | ktime_t res = ktime_add_unsafe(lhs, rhs); |
5a7780e7 TG |
309 | |
310 | /* | |
311 | * We use KTIME_SEC_MAX here, the maximum timeout which we can | |
312 | * return to user space in a timespec: | |
313 | */ | |
2456e855 | 314 | if (res < 0 || res < lhs || res < rhs) |
5a7780e7 TG |
315 | res = ktime_set(KTIME_SEC_MAX, 0); |
316 | ||
317 | return res; | |
318 | } | |
319 | ||
8daa21e6 AB |
320 | EXPORT_SYMBOL_GPL(ktime_add_safe); |
321 | ||
237fc6e7 TG |
322 | #ifdef CONFIG_DEBUG_OBJECTS_TIMERS |
323 | ||
324 | static struct debug_obj_descr hrtimer_debug_descr; | |
325 | ||
99777288 SG |
326 | static void *hrtimer_debug_hint(void *addr) |
327 | { | |
328 | return ((struct hrtimer *) addr)->function; | |
329 | } | |
330 | ||
237fc6e7 TG |
331 | /* |
332 | * fixup_init is called when: | |
333 | * - an active object is initialized | |
334 | */ | |
e3252464 | 335 | static bool hrtimer_fixup_init(void *addr, enum debug_obj_state state) |
237fc6e7 TG |
336 | { |
337 | struct hrtimer *timer = addr; | |
338 | ||
339 | switch (state) { | |
340 | case ODEBUG_STATE_ACTIVE: | |
341 | hrtimer_cancel(timer); | |
342 | debug_object_init(timer, &hrtimer_debug_descr); | |
e3252464 | 343 | return true; |
237fc6e7 | 344 | default: |
e3252464 | 345 | return false; |
237fc6e7 TG |
346 | } |
347 | } | |
348 | ||
349 | /* | |
350 | * fixup_activate is called when: | |
351 | * - an active object is activated | |
b9fdac7f | 352 | * - an unknown non-static object is activated |
237fc6e7 | 353 | */ |
e3252464 | 354 | static bool hrtimer_fixup_activate(void *addr, enum debug_obj_state state) |
237fc6e7 TG |
355 | { |
356 | switch (state) { | |
237fc6e7 TG |
357 | case ODEBUG_STATE_ACTIVE: |
358 | WARN_ON(1); | |
359 | ||
360 | default: | |
e3252464 | 361 | return false; |
237fc6e7 TG |
362 | } |
363 | } | |
364 | ||
365 | /* | |
366 | * fixup_free is called when: | |
367 | * - an active object is freed | |
368 | */ | |
e3252464 | 369 | static bool hrtimer_fixup_free(void *addr, enum debug_obj_state state) |
237fc6e7 TG |
370 | { |
371 | struct hrtimer *timer = addr; | |
372 | ||
373 | switch (state) { | |
374 | case ODEBUG_STATE_ACTIVE: | |
375 | hrtimer_cancel(timer); | |
376 | debug_object_free(timer, &hrtimer_debug_descr); | |
e3252464 | 377 | return true; |
237fc6e7 | 378 | default: |
e3252464 | 379 | return false; |
237fc6e7 TG |
380 | } |
381 | } | |
382 | ||
383 | static struct debug_obj_descr hrtimer_debug_descr = { | |
384 | .name = "hrtimer", | |
99777288 | 385 | .debug_hint = hrtimer_debug_hint, |
237fc6e7 TG |
386 | .fixup_init = hrtimer_fixup_init, |
387 | .fixup_activate = hrtimer_fixup_activate, | |
388 | .fixup_free = hrtimer_fixup_free, | |
389 | }; | |
390 | ||
391 | static inline void debug_hrtimer_init(struct hrtimer *timer) | |
392 | { | |
393 | debug_object_init(timer, &hrtimer_debug_descr); | |
394 | } | |
395 | ||
396 | static inline void debug_hrtimer_activate(struct hrtimer *timer) | |
397 | { | |
398 | debug_object_activate(timer, &hrtimer_debug_descr); | |
399 | } | |
400 | ||
401 | static inline void debug_hrtimer_deactivate(struct hrtimer *timer) | |
402 | { | |
403 | debug_object_deactivate(timer, &hrtimer_debug_descr); | |
404 | } | |
405 | ||
406 | static inline void debug_hrtimer_free(struct hrtimer *timer) | |
407 | { | |
408 | debug_object_free(timer, &hrtimer_debug_descr); | |
409 | } | |
410 | ||
411 | static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | |
412 | enum hrtimer_mode mode); | |
413 | ||
414 | void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id, | |
415 | enum hrtimer_mode mode) | |
416 | { | |
417 | debug_object_init_on_stack(timer, &hrtimer_debug_descr); | |
418 | __hrtimer_init(timer, clock_id, mode); | |
419 | } | |
2bc481cf | 420 | EXPORT_SYMBOL_GPL(hrtimer_init_on_stack); |
237fc6e7 TG |
421 | |
422 | void destroy_hrtimer_on_stack(struct hrtimer *timer) | |
423 | { | |
424 | debug_object_free(timer, &hrtimer_debug_descr); | |
425 | } | |
c08376ac | 426 | EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack); |
237fc6e7 TG |
427 | |
428 | #else | |
429 | static inline void debug_hrtimer_init(struct hrtimer *timer) { } | |
430 | static inline void debug_hrtimer_activate(struct hrtimer *timer) { } | |
431 | static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } | |
432 | #endif | |
433 | ||
c6a2a177 XG |
434 | static inline void |
435 | debug_init(struct hrtimer *timer, clockid_t clockid, | |
436 | enum hrtimer_mode mode) | |
437 | { | |
438 | debug_hrtimer_init(timer); | |
439 | trace_hrtimer_init(timer, clockid, mode); | |
440 | } | |
441 | ||
442 | static inline void debug_activate(struct hrtimer *timer) | |
443 | { | |
444 | debug_hrtimer_activate(timer); | |
445 | trace_hrtimer_start(timer); | |
446 | } | |
447 | ||
448 | static inline void debug_deactivate(struct hrtimer *timer) | |
449 | { | |
450 | debug_hrtimer_deactivate(timer); | |
451 | trace_hrtimer_cancel(timer); | |
452 | } | |
453 | ||
9bc74919 | 454 | #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS) |
895bdfa7 TG |
455 | static inline void hrtimer_update_next_timer(struct hrtimer_cpu_base *cpu_base, |
456 | struct hrtimer *timer) | |
457 | { | |
458 | #ifdef CONFIG_HIGH_RES_TIMERS | |
459 | cpu_base->next_timer = timer; | |
460 | #endif | |
461 | } | |
462 | ||
4ebbda52 | 463 | static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) |
9bc74919 TG |
464 | { |
465 | struct hrtimer_clock_base *base = cpu_base->clock_base; | |
34aee88a | 466 | unsigned int active = cpu_base->active_bases; |
2456e855 | 467 | ktime_t expires, expires_next = KTIME_MAX; |
9bc74919 | 468 | |
895bdfa7 | 469 | hrtimer_update_next_timer(cpu_base, NULL); |
34aee88a | 470 | for (; active; base++, active >>= 1) { |
9bc74919 TG |
471 | struct timerqueue_node *next; |
472 | struct hrtimer *timer; | |
473 | ||
34aee88a | 474 | if (!(active & 0x01)) |
9bc74919 TG |
475 | continue; |
476 | ||
34aee88a | 477 | next = timerqueue_getnext(&base->active); |
9bc74919 TG |
478 | timer = container_of(next, struct hrtimer, node); |
479 | expires = ktime_sub(hrtimer_get_expires(timer), base->offset); | |
2456e855 | 480 | if (expires < expires_next) { |
9bc74919 | 481 | expires_next = expires; |
895bdfa7 TG |
482 | hrtimer_update_next_timer(cpu_base, timer); |
483 | } | |
9bc74919 TG |
484 | } |
485 | /* | |
486 | * clock_was_set() might have changed base->offset of any of | |
487 | * the clock bases so the result might be negative. Fix it up | |
488 | * to prevent a false positive in clockevents_program_event(). | |
489 | */ | |
2456e855 TG |
490 | if (expires_next < 0) |
491 | expires_next = 0; | |
9bc74919 TG |
492 | return expires_next; |
493 | } | |
494 | #endif | |
495 | ||
21d6d52a TG |
496 | static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) |
497 | { | |
498 | ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; | |
499 | ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset; | |
500 | ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset; | |
501 | ||
868a3e91 TG |
502 | return ktime_get_update_offsets_now(&base->clock_was_set_seq, |
503 | offs_real, offs_boot, offs_tai); | |
21d6d52a TG |
504 | } |
505 | ||
54cdfdb4 TG |
506 | /* High resolution timer related functions */ |
507 | #ifdef CONFIG_HIGH_RES_TIMERS | |
508 | ||
509 | /* | |
510 | * High resolution timer enabled ? | |
511 | */ | |
4cc7ecb7 | 512 | static bool hrtimer_hres_enabled __read_mostly = true; |
398ca17f TG |
513 | unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC; |
514 | EXPORT_SYMBOL_GPL(hrtimer_resolution); | |
54cdfdb4 TG |
515 | |
516 | /* | |
517 | * Enable / Disable high resolution mode | |
518 | */ | |
519 | static int __init setup_hrtimer_hres(char *str) | |
520 | { | |
4cc7ecb7 | 521 | return (kstrtobool(str, &hrtimer_hres_enabled) == 0); |
54cdfdb4 TG |
522 | } |
523 | ||
524 | __setup("highres=", setup_hrtimer_hres); | |
525 | ||
526 | /* | |
527 | * hrtimer_high_res_enabled - query, if the highres mode is enabled | |
528 | */ | |
529 | static inline int hrtimer_is_hres_enabled(void) | |
530 | { | |
531 | return hrtimer_hres_enabled; | |
532 | } | |
533 | ||
534 | /* | |
535 | * Is the high resolution mode active ? | |
536 | */ | |
e19ffe8b TG |
537 | static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base) |
538 | { | |
539 | return cpu_base->hres_active; | |
540 | } | |
541 | ||
54cdfdb4 TG |
542 | static inline int hrtimer_hres_active(void) |
543 | { | |
e19ffe8b | 544 | return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases)); |
54cdfdb4 TG |
545 | } |
546 | ||
547 | /* | |
548 | * Reprogram the event source with checking both queues for the | |
549 | * next event | |
550 | * Called with interrupts disabled and base->lock held | |
551 | */ | |
7403f41f AC |
552 | static void |
553 | hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) | |
54cdfdb4 | 554 | { |
21d6d52a TG |
555 | ktime_t expires_next; |
556 | ||
557 | if (!cpu_base->hres_active) | |
558 | return; | |
559 | ||
560 | expires_next = __hrtimer_get_next_event(cpu_base); | |
54cdfdb4 | 561 | |
2456e855 | 562 | if (skip_equal && expires_next == cpu_base->expires_next) |
7403f41f AC |
563 | return; |
564 | ||
2456e855 | 565 | cpu_base->expires_next = expires_next; |
7403f41f | 566 | |
6c6c0d5a SH |
567 | /* |
568 | * If a hang was detected in the last timer interrupt then we | |
569 | * leave the hang delay active in the hardware. We want the | |
570 | * system to make progress. That also prevents the following | |
571 | * scenario: | |
572 | * T1 expires 50ms from now | |
573 | * T2 expires 5s from now | |
574 | * | |
575 | * T1 is removed, so this code is called and would reprogram | |
576 | * the hardware to 5s from now. Any hrtimer_start after that | |
577 | * will not reprogram the hardware due to hang_detected being | |
578 | * set. So we'd effectivly block all timers until the T2 event | |
579 | * fires. | |
580 | */ | |
581 | if (cpu_base->hang_detected) | |
582 | return; | |
583 | ||
d2540875 | 584 | tick_program_event(cpu_base->expires_next, 1); |
54cdfdb4 TG |
585 | } |
586 | ||
587 | /* | |
54cdfdb4 TG |
588 | * When a timer is enqueued and expires earlier than the already enqueued |
589 | * timers, we have to check, whether it expires earlier than the timer for | |
590 | * which the clock event device was armed. | |
591 | * | |
592 | * Called with interrupts disabled and base->cpu_base.lock held | |
593 | */ | |
c6eb3f70 TG |
594 | static void hrtimer_reprogram(struct hrtimer *timer, |
595 | struct hrtimer_clock_base *base) | |
54cdfdb4 | 596 | { |
dc5df73b | 597 | struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); |
cc584b21 | 598 | ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); |
54cdfdb4 | 599 | |
cc584b21 | 600 | WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0); |
63070a79 | 601 | |
54cdfdb4 | 602 | /* |
c6eb3f70 TG |
603 | * If the timer is not on the current cpu, we cannot reprogram |
604 | * the other cpus clock event device. | |
54cdfdb4 | 605 | */ |
c6eb3f70 TG |
606 | if (base->cpu_base != cpu_base) |
607 | return; | |
608 | ||
609 | /* | |
610 | * If the hrtimer interrupt is running, then it will | |
611 | * reevaluate the clock bases and reprogram the clock event | |
612 | * device. The callbacks are always executed in hard interrupt | |
613 | * context so we don't need an extra check for a running | |
614 | * callback. | |
615 | */ | |
616 | if (cpu_base->in_hrtirq) | |
617 | return; | |
54cdfdb4 | 618 | |
63070a79 TG |
619 | /* |
620 | * CLOCK_REALTIME timer might be requested with an absolute | |
c6eb3f70 | 621 | * expiry time which is less than base->offset. Set it to 0. |
63070a79 | 622 | */ |
2456e855 TG |
623 | if (expires < 0) |
624 | expires = 0; | |
63070a79 | 625 | |
2456e855 | 626 | if (expires >= cpu_base->expires_next) |
c6eb3f70 | 627 | return; |
41d2e494 | 628 | |
c6eb3f70 | 629 | /* Update the pointer to the next expiring timer */ |
895bdfa7 | 630 | cpu_base->next_timer = timer; |
9bc74919 | 631 | |
41d2e494 TG |
632 | /* |
633 | * If a hang was detected in the last timer interrupt then we | |
634 | * do not schedule a timer which is earlier than the expiry | |
635 | * which we enforced in the hang detection. We want the system | |
636 | * to make progress. | |
637 | */ | |
638 | if (cpu_base->hang_detected) | |
c6eb3f70 | 639 | return; |
54cdfdb4 TG |
640 | |
641 | /* | |
c6eb3f70 TG |
642 | * Program the timer hardware. We enforce the expiry for |
643 | * events which are already in the past. | |
54cdfdb4 | 644 | */ |
c6eb3f70 TG |
645 | cpu_base->expires_next = expires; |
646 | tick_program_event(expires, 1); | |
54cdfdb4 TG |
647 | } |
648 | ||
54cdfdb4 TG |
649 | /* |
650 | * Initialize the high resolution related parts of cpu_base | |
651 | */ | |
652 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) | |
653 | { | |
2456e855 | 654 | base->expires_next = KTIME_MAX; |
54cdfdb4 | 655 | base->hres_active = 0; |
54cdfdb4 TG |
656 | } |
657 | ||
9ec26907 TG |
658 | /* |
659 | * Retrigger next event is called after clock was set | |
660 | * | |
661 | * Called with interrupts disabled via on_each_cpu() | |
662 | */ | |
663 | static void retrigger_next_event(void *arg) | |
664 | { | |
dc5df73b | 665 | struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases); |
9ec26907 | 666 | |
e19ffe8b | 667 | if (!base->hres_active) |
9ec26907 TG |
668 | return; |
669 | ||
9ec26907 | 670 | raw_spin_lock(&base->lock); |
5baefd6d | 671 | hrtimer_update_base(base); |
9ec26907 TG |
672 | hrtimer_force_reprogram(base, 0); |
673 | raw_spin_unlock(&base->lock); | |
674 | } | |
b12a03ce | 675 | |
54cdfdb4 TG |
676 | /* |
677 | * Switch to high resolution mode | |
678 | */ | |
75e3b37d | 679 | static void hrtimer_switch_to_hres(void) |
54cdfdb4 | 680 | { |
c6eb3f70 | 681 | struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases); |
54cdfdb4 TG |
682 | |
683 | if (tick_init_highres()) { | |
820de5c3 | 684 | printk(KERN_WARNING "Could not switch to high resolution " |
c6eb3f70 | 685 | "mode on CPU %d\n", base->cpu); |
85e1cd6e | 686 | return; |
54cdfdb4 TG |
687 | } |
688 | base->hres_active = 1; | |
398ca17f | 689 | hrtimer_resolution = HIGH_RES_NSEC; |
54cdfdb4 TG |
690 | |
691 | tick_setup_sched_timer(); | |
54cdfdb4 TG |
692 | /* "Retrigger" the interrupt to get things going */ |
693 | retrigger_next_event(NULL); | |
54cdfdb4 TG |
694 | } |
695 | ||
5ec2481b TG |
696 | static void clock_was_set_work(struct work_struct *work) |
697 | { | |
698 | clock_was_set(); | |
699 | } | |
700 | ||
701 | static DECLARE_WORK(hrtimer_work, clock_was_set_work); | |
702 | ||
f55a6faa | 703 | /* |
b4d90e9f | 704 | * Called from timekeeping and resume code to reprogram the hrtimer |
5ec2481b | 705 | * interrupt device on all cpus. |
f55a6faa JS |
706 | */ |
707 | void clock_was_set_delayed(void) | |
708 | { | |
5ec2481b | 709 | schedule_work(&hrtimer_work); |
f55a6faa JS |
710 | } |
711 | ||
54cdfdb4 TG |
712 | #else |
713 | ||
e19ffe8b | 714 | static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *b) { return 0; } |
54cdfdb4 TG |
715 | static inline int hrtimer_hres_active(void) { return 0; } |
716 | static inline int hrtimer_is_hres_enabled(void) { return 0; } | |
75e3b37d | 717 | static inline void hrtimer_switch_to_hres(void) { } |
7403f41f AC |
718 | static inline void |
719 | hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } | |
9e1e01dd VK |
720 | static inline int hrtimer_reprogram(struct hrtimer *timer, |
721 | struct hrtimer_clock_base *base) | |
54cdfdb4 TG |
722 | { |
723 | return 0; | |
724 | } | |
54cdfdb4 | 725 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } |
9ec26907 | 726 | static inline void retrigger_next_event(void *arg) { } |
54cdfdb4 TG |
727 | |
728 | #endif /* CONFIG_HIGH_RES_TIMERS */ | |
729 | ||
b12a03ce TG |
730 | /* |
731 | * Clock realtime was set | |
732 | * | |
733 | * Change the offset of the realtime clock vs. the monotonic | |
734 | * clock. | |
735 | * | |
736 | * We might have to reprogram the high resolution timer interrupt. On | |
737 | * SMP we call the architecture specific code to retrigger _all_ high | |
738 | * resolution timer interrupts. On UP we just disable interrupts and | |
739 | * call the high resolution interrupt code. | |
740 | */ | |
741 | void clock_was_set(void) | |
742 | { | |
90ff1f30 | 743 | #ifdef CONFIG_HIGH_RES_TIMERS |
b12a03ce TG |
744 | /* Retrigger the CPU local events everywhere */ |
745 | on_each_cpu(retrigger_next_event, NULL, 1); | |
9ec26907 TG |
746 | #endif |
747 | timerfd_clock_was_set(); | |
b12a03ce TG |
748 | } |
749 | ||
750 | /* | |
751 | * During resume we might have to reprogram the high resolution timer | |
7c4c3a0f DV |
752 | * interrupt on all online CPUs. However, all other CPUs will be |
753 | * stopped with IRQs interrupts disabled so the clock_was_set() call | |
5ec2481b | 754 | * must be deferred. |
b12a03ce TG |
755 | */ |
756 | void hrtimers_resume(void) | |
757 | { | |
758 | WARN_ONCE(!irqs_disabled(), | |
759 | KERN_INFO "hrtimers_resume() called with IRQs enabled!"); | |
760 | ||
5ec2481b | 761 | /* Retrigger on the local CPU */ |
b12a03ce | 762 | retrigger_next_event(NULL); |
5ec2481b TG |
763 | /* And schedule a retrigger for all others */ |
764 | clock_was_set_delayed(); | |
b12a03ce TG |
765 | } |
766 | ||
c0a31329 | 767 | /* |
6506f2aa | 768 | * Counterpart to lock_hrtimer_base above: |
c0a31329 TG |
769 | */ |
770 | static inline | |
771 | void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | |
772 | { | |
ecb49d1a | 773 | raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); |
c0a31329 TG |
774 | } |
775 | ||
776 | /** | |
777 | * hrtimer_forward - forward the timer expiry | |
c0a31329 | 778 | * @timer: hrtimer to forward |
44f21475 | 779 | * @now: forward past this time |
c0a31329 TG |
780 | * @interval: the interval to forward |
781 | * | |
782 | * Forward the timer expiry so it will expire in the future. | |
8dca6f33 | 783 | * Returns the number of overruns. |
91e5a217 TG |
784 | * |
785 | * Can be safely called from the callback function of @timer. If | |
786 | * called from other contexts @timer must neither be enqueued nor | |
787 | * running the callback and the caller needs to take care of | |
788 | * serialization. | |
789 | * | |
790 | * Note: This only updates the timer expiry value and does not requeue | |
791 | * the timer. | |
c0a31329 | 792 | */ |
4d672e7a | 793 | u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) |
c0a31329 | 794 | { |
4d672e7a | 795 | u64 orun = 1; |
44f21475 | 796 | ktime_t delta; |
c0a31329 | 797 | |
cc584b21 | 798 | delta = ktime_sub(now, hrtimer_get_expires(timer)); |
c0a31329 | 799 | |
2456e855 | 800 | if (delta < 0) |
c0a31329 TG |
801 | return 0; |
802 | ||
5de2755c PZ |
803 | if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED)) |
804 | return 0; | |
805 | ||
2456e855 TG |
806 | if (interval < hrtimer_resolution) |
807 | interval = hrtimer_resolution; | |
c9db4fa1 | 808 | |
2456e855 | 809 | if (unlikely(delta >= interval)) { |
df869b63 | 810 | s64 incr = ktime_to_ns(interval); |
c0a31329 TG |
811 | |
812 | orun = ktime_divns(delta, incr); | |
cc584b21 | 813 | hrtimer_add_expires_ns(timer, incr * orun); |
2456e855 | 814 | if (hrtimer_get_expires_tv64(timer) > now) |
c0a31329 TG |
815 | return orun; |
816 | /* | |
817 | * This (and the ktime_add() below) is the | |
818 | * correction for exact: | |
819 | */ | |
820 | orun++; | |
821 | } | |
cc584b21 | 822 | hrtimer_add_expires(timer, interval); |
c0a31329 TG |
823 | |
824 | return orun; | |
825 | } | |
6bdb6b62 | 826 | EXPORT_SYMBOL_GPL(hrtimer_forward); |
c0a31329 TG |
827 | |
828 | /* | |
829 | * enqueue_hrtimer - internal function to (re)start a timer | |
830 | * | |
831 | * The timer is inserted in expiry order. Insertion into the | |
832 | * red black tree is O(log(n)). Must hold the base lock. | |
a6037b61 PZ |
833 | * |
834 | * Returns 1 when the new timer is the leftmost timer in the tree. | |
c0a31329 | 835 | */ |
a6037b61 PZ |
836 | static int enqueue_hrtimer(struct hrtimer *timer, |
837 | struct hrtimer_clock_base *base) | |
c0a31329 | 838 | { |
c6a2a177 | 839 | debug_activate(timer); |
237fc6e7 | 840 | |
ab8177bc | 841 | base->cpu_base->active_bases |= 1 << base->index; |
54cdfdb4 | 842 | |
887d9dc9 | 843 | timer->state = HRTIMER_STATE_ENQUEUED; |
a6037b61 | 844 | |
b97f44c9 | 845 | return timerqueue_add(&base->active, &timer->node); |
288867ec | 846 | } |
c0a31329 TG |
847 | |
848 | /* | |
849 | * __remove_hrtimer - internal function to remove a timer | |
850 | * | |
851 | * Caller must hold the base lock. | |
54cdfdb4 TG |
852 | * |
853 | * High resolution timer mode reprograms the clock event device when the | |
854 | * timer is the one which expires next. The caller can disable this by setting | |
855 | * reprogram to zero. This is useful, when the context does a reprogramming | |
856 | * anyway (e.g. timer interrupt) | |
c0a31329 | 857 | */ |
3c8aa39d | 858 | static void __remove_hrtimer(struct hrtimer *timer, |
303e967f | 859 | struct hrtimer_clock_base *base, |
203cbf77 | 860 | u8 newstate, int reprogram) |
c0a31329 | 861 | { |
e19ffe8b | 862 | struct hrtimer_cpu_base *cpu_base = base->cpu_base; |
203cbf77 | 863 | u8 state = timer->state; |
e19ffe8b | 864 | |
895bdfa7 TG |
865 | timer->state = newstate; |
866 | if (!(state & HRTIMER_STATE_ENQUEUED)) | |
867 | return; | |
7403f41f | 868 | |
b97f44c9 | 869 | if (!timerqueue_del(&base->active, &timer->node)) |
e19ffe8b | 870 | cpu_base->active_bases &= ~(1 << base->index); |
7403f41f | 871 | |
7403f41f | 872 | #ifdef CONFIG_HIGH_RES_TIMERS |
895bdfa7 TG |
873 | /* |
874 | * Note: If reprogram is false we do not update | |
875 | * cpu_base->next_timer. This happens when we remove the first | |
876 | * timer on a remote cpu. No harm as we never dereference | |
877 | * cpu_base->next_timer. So the worst thing what can happen is | |
878 | * an superflous call to hrtimer_force_reprogram() on the | |
879 | * remote cpu later on if the same timer gets enqueued again. | |
880 | */ | |
881 | if (reprogram && timer == cpu_base->next_timer) | |
882 | hrtimer_force_reprogram(cpu_base, 1); | |
7403f41f | 883 | #endif |
c0a31329 TG |
884 | } |
885 | ||
886 | /* | |
887 | * remove hrtimer, called with base lock held | |
888 | */ | |
889 | static inline int | |
8edfb036 | 890 | remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart) |
c0a31329 | 891 | { |
303e967f | 892 | if (hrtimer_is_queued(timer)) { |
203cbf77 | 893 | u8 state = timer->state; |
54cdfdb4 TG |
894 | int reprogram; |
895 | ||
896 | /* | |
897 | * Remove the timer and force reprogramming when high | |
898 | * resolution mode is active and the timer is on the current | |
899 | * CPU. If we remove a timer on another CPU, reprogramming is | |
900 | * skipped. The interrupt event on this CPU is fired and | |
901 | * reprogramming happens in the interrupt handler. This is a | |
902 | * rare case and less expensive than a smp call. | |
903 | */ | |
c6a2a177 | 904 | debug_deactivate(timer); |
dc5df73b | 905 | reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases); |
8edfb036 | 906 | |
887d9dc9 PZ |
907 | if (!restart) |
908 | state = HRTIMER_STATE_INACTIVE; | |
909 | ||
f13d4f97 | 910 | __remove_hrtimer(timer, base, state, reprogram); |
c0a31329 TG |
911 | return 1; |
912 | } | |
913 | return 0; | |
914 | } | |
915 | ||
203cbf77 TG |
916 | static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim, |
917 | const enum hrtimer_mode mode) | |
918 | { | |
919 | #ifdef CONFIG_TIME_LOW_RES | |
920 | /* | |
921 | * CONFIG_TIME_LOW_RES indicates that the system has no way to return | |
922 | * granular time values. For relative timers we add hrtimer_resolution | |
923 | * (i.e. one jiffie) to prevent short timeouts. | |
924 | */ | |
925 | timer->is_rel = mode & HRTIMER_MODE_REL; | |
926 | if (timer->is_rel) | |
8b0e1953 | 927 | tim = ktime_add_safe(tim, hrtimer_resolution); |
203cbf77 TG |
928 | #endif |
929 | return tim; | |
930 | } | |
931 | ||
58f1f803 TG |
932 | /** |
933 | * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU | |
934 | * @timer: the timer to be added | |
935 | * @tim: expiry time | |
936 | * @delta_ns: "slack" range for the timer | |
937 | * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or | |
938 | * relative (HRTIMER_MODE_REL) | |
58f1f803 | 939 | */ |
61699e13 | 940 | void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, |
da8b44d5 | 941 | u64 delta_ns, const enum hrtimer_mode mode) |
c0a31329 | 942 | { |
3c8aa39d | 943 | struct hrtimer_clock_base *base, *new_base; |
c0a31329 | 944 | unsigned long flags; |
61699e13 | 945 | int leftmost; |
c0a31329 TG |
946 | |
947 | base = lock_hrtimer_base(timer, &flags); | |
948 | ||
949 | /* Remove an active timer from the queue: */ | |
8edfb036 | 950 | remove_hrtimer(timer, base, true); |
c0a31329 | 951 | |
203cbf77 | 952 | if (mode & HRTIMER_MODE_REL) |
84ea7fe3 | 953 | tim = ktime_add_safe(tim, base->get_time()); |
203cbf77 TG |
954 | |
955 | tim = hrtimer_update_lowres(timer, tim, mode); | |
237fc6e7 | 956 | |
da8f2e17 | 957 | hrtimer_set_expires_range_ns(timer, tim, delta_ns); |
c0a31329 | 958 | |
84ea7fe3 VK |
959 | /* Switch the timer base, if necessary: */ |
960 | new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); | |
961 | ||
a6037b61 | 962 | leftmost = enqueue_hrtimer(timer, new_base); |
61699e13 TG |
963 | if (!leftmost) |
964 | goto unlock; | |
49a2a075 VK |
965 | |
966 | if (!hrtimer_is_hres_active(timer)) { | |
967 | /* | |
968 | * Kick to reschedule the next tick to handle the new timer | |
969 | * on dynticks target. | |
970 | */ | |
683be13a TG |
971 | if (new_base->cpu_base->nohz_active) |
972 | wake_up_nohz_cpu(new_base->cpu_base->cpu); | |
c6eb3f70 TG |
973 | } else { |
974 | hrtimer_reprogram(timer, new_base); | |
b22affe0 | 975 | } |
61699e13 | 976 | unlock: |
c0a31329 | 977 | unlock_hrtimer_base(timer, &flags); |
7f1e2ca9 | 978 | } |
da8f2e17 AV |
979 | EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); |
980 | ||
c0a31329 TG |
981 | /** |
982 | * hrtimer_try_to_cancel - try to deactivate a timer | |
c0a31329 TG |
983 | * @timer: hrtimer to stop |
984 | * | |
985 | * Returns: | |
986 | * 0 when the timer was not active | |
987 | * 1 when the timer was active | |
988 | * -1 when the timer is currently excuting the callback function and | |
fa9799e3 | 989 | * cannot be stopped |
c0a31329 TG |
990 | */ |
991 | int hrtimer_try_to_cancel(struct hrtimer *timer) | |
992 | { | |
3c8aa39d | 993 | struct hrtimer_clock_base *base; |
c0a31329 TG |
994 | unsigned long flags; |
995 | int ret = -1; | |
996 | ||
19d9f422 TG |
997 | /* |
998 | * Check lockless first. If the timer is not active (neither | |
999 | * enqueued nor running the callback, nothing to do here. The | |
1000 | * base lock does not serialize against a concurrent enqueue, | |
1001 | * so we can avoid taking it. | |
1002 | */ | |
1003 | if (!hrtimer_active(timer)) | |
1004 | return 0; | |
1005 | ||
c0a31329 TG |
1006 | base = lock_hrtimer_base(timer, &flags); |
1007 | ||
303e967f | 1008 | if (!hrtimer_callback_running(timer)) |
8edfb036 | 1009 | ret = remove_hrtimer(timer, base, false); |
c0a31329 TG |
1010 | |
1011 | unlock_hrtimer_base(timer, &flags); | |
1012 | ||
1013 | return ret; | |
1014 | ||
1015 | } | |
8d16b764 | 1016 | EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel); |
c0a31329 TG |
1017 | |
1018 | /** | |
1019 | * hrtimer_cancel - cancel a timer and wait for the handler to finish. | |
c0a31329 TG |
1020 | * @timer: the timer to be cancelled |
1021 | * | |
1022 | * Returns: | |
1023 | * 0 when the timer was not active | |
1024 | * 1 when the timer was active | |
1025 | */ | |
1026 | int hrtimer_cancel(struct hrtimer *timer) | |
1027 | { | |
1028 | for (;;) { | |
1029 | int ret = hrtimer_try_to_cancel(timer); | |
1030 | ||
1031 | if (ret >= 0) | |
1032 | return ret; | |
5ef37b19 | 1033 | cpu_relax(); |
c0a31329 TG |
1034 | } |
1035 | } | |
8d16b764 | 1036 | EXPORT_SYMBOL_GPL(hrtimer_cancel); |
c0a31329 TG |
1037 | |
1038 | /** | |
1039 | * hrtimer_get_remaining - get remaining time for the timer | |
c0a31329 | 1040 | * @timer: the timer to read |
203cbf77 | 1041 | * @adjust: adjust relative timers when CONFIG_TIME_LOW_RES=y |
c0a31329 | 1042 | */ |
203cbf77 | 1043 | ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust) |
c0a31329 | 1044 | { |
c0a31329 TG |
1045 | unsigned long flags; |
1046 | ktime_t rem; | |
1047 | ||
b3bd3de6 | 1048 | lock_hrtimer_base(timer, &flags); |
203cbf77 TG |
1049 | if (IS_ENABLED(CONFIG_TIME_LOW_RES) && adjust) |
1050 | rem = hrtimer_expires_remaining_adjusted(timer); | |
1051 | else | |
1052 | rem = hrtimer_expires_remaining(timer); | |
c0a31329 TG |
1053 | unlock_hrtimer_base(timer, &flags); |
1054 | ||
1055 | return rem; | |
1056 | } | |
203cbf77 | 1057 | EXPORT_SYMBOL_GPL(__hrtimer_get_remaining); |
c0a31329 | 1058 | |
3451d024 | 1059 | #ifdef CONFIG_NO_HZ_COMMON |
69239749 TL |
1060 | /** |
1061 | * hrtimer_get_next_event - get the time until next expiry event | |
1062 | * | |
c1ad348b | 1063 | * Returns the next expiry time or KTIME_MAX if no timer is pending. |
69239749 | 1064 | */ |
c1ad348b | 1065 | u64 hrtimer_get_next_event(void) |
69239749 | 1066 | { |
dc5df73b | 1067 | struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); |
c1ad348b | 1068 | u64 expires = KTIME_MAX; |
69239749 | 1069 | unsigned long flags; |
69239749 | 1070 | |
ecb49d1a | 1071 | raw_spin_lock_irqsave(&cpu_base->lock, flags); |
3c8aa39d | 1072 | |
e19ffe8b | 1073 | if (!__hrtimer_hres_active(cpu_base)) |
2456e855 | 1074 | expires = __hrtimer_get_next_event(cpu_base); |
3c8aa39d | 1075 | |
ecb49d1a | 1076 | raw_spin_unlock_irqrestore(&cpu_base->lock, flags); |
3c8aa39d | 1077 | |
c1ad348b | 1078 | return expires; |
69239749 TL |
1079 | } |
1080 | #endif | |
1081 | ||
336a9cde MZ |
1082 | static inline int hrtimer_clockid_to_base(clockid_t clock_id) |
1083 | { | |
1084 | if (likely(clock_id < MAX_CLOCKS)) { | |
1085 | int base = hrtimer_clock_to_base_table[clock_id]; | |
1086 | ||
1087 | if (likely(base != HRTIMER_MAX_CLOCK_BASES)) | |
1088 | return base; | |
1089 | } | |
1090 | WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id); | |
1091 | return HRTIMER_BASE_MONOTONIC; | |
1092 | } | |
1093 | ||
237fc6e7 TG |
1094 | static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, |
1095 | enum hrtimer_mode mode) | |
c0a31329 | 1096 | { |
3c8aa39d | 1097 | struct hrtimer_cpu_base *cpu_base; |
e06383db | 1098 | int base; |
c0a31329 | 1099 | |
7978672c GA |
1100 | memset(timer, 0, sizeof(struct hrtimer)); |
1101 | ||
22127e93 | 1102 | cpu_base = raw_cpu_ptr(&hrtimer_bases); |
c0a31329 | 1103 | |
c9cb2e3d | 1104 | if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS) |
7978672c GA |
1105 | clock_id = CLOCK_MONOTONIC; |
1106 | ||
e06383db JS |
1107 | base = hrtimer_clockid_to_base(clock_id); |
1108 | timer->base = &cpu_base->clock_base[base]; | |
998adc3d | 1109 | timerqueue_init(&timer->node); |
c0a31329 | 1110 | } |
237fc6e7 TG |
1111 | |
1112 | /** | |
1113 | * hrtimer_init - initialize a timer to the given clock | |
1114 | * @timer: the timer to be initialized | |
1115 | * @clock_id: the clock to be used | |
1116 | * @mode: timer mode abs/rel | |
1117 | */ | |
1118 | void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | |
1119 | enum hrtimer_mode mode) | |
1120 | { | |
c6a2a177 | 1121 | debug_init(timer, clock_id, mode); |
237fc6e7 TG |
1122 | __hrtimer_init(timer, clock_id, mode); |
1123 | } | |
8d16b764 | 1124 | EXPORT_SYMBOL_GPL(hrtimer_init); |
c0a31329 | 1125 | |
887d9dc9 PZ |
1126 | /* |
1127 | * A timer is active, when it is enqueued into the rbtree or the | |
1128 | * callback function is running or it's in the state of being migrated | |
1129 | * to another cpu. | |
c0a31329 | 1130 | * |
887d9dc9 | 1131 | * It is important for this function to not return a false negative. |
c0a31329 | 1132 | */ |
887d9dc9 | 1133 | bool hrtimer_active(const struct hrtimer *timer) |
c0a31329 | 1134 | { |
3c8aa39d | 1135 | struct hrtimer_cpu_base *cpu_base; |
887d9dc9 | 1136 | unsigned int seq; |
c0a31329 | 1137 | |
887d9dc9 PZ |
1138 | do { |
1139 | cpu_base = READ_ONCE(timer->base->cpu_base); | |
1140 | seq = raw_read_seqcount_begin(&cpu_base->seq); | |
c0a31329 | 1141 | |
887d9dc9 PZ |
1142 | if (timer->state != HRTIMER_STATE_INACTIVE || |
1143 | cpu_base->running == timer) | |
1144 | return true; | |
1145 | ||
1146 | } while (read_seqcount_retry(&cpu_base->seq, seq) || | |
1147 | cpu_base != READ_ONCE(timer->base->cpu_base)); | |
1148 | ||
1149 | return false; | |
c0a31329 | 1150 | } |
887d9dc9 | 1151 | EXPORT_SYMBOL_GPL(hrtimer_active); |
c0a31329 | 1152 | |
887d9dc9 PZ |
1153 | /* |
1154 | * The write_seqcount_barrier()s in __run_hrtimer() split the thing into 3 | |
1155 | * distinct sections: | |
1156 | * | |
1157 | * - queued: the timer is queued | |
1158 | * - callback: the timer is being ran | |
1159 | * - post: the timer is inactive or (re)queued | |
1160 | * | |
1161 | * On the read side we ensure we observe timer->state and cpu_base->running | |
1162 | * from the same section, if anything changed while we looked at it, we retry. | |
1163 | * This includes timer->base changing because sequence numbers alone are | |
1164 | * insufficient for that. | |
1165 | * | |
1166 | * The sequence numbers are required because otherwise we could still observe | |
1167 | * a false negative if the read side got smeared over multiple consequtive | |
1168 | * __run_hrtimer() invocations. | |
1169 | */ | |
1170 | ||
21d6d52a TG |
1171 | static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base, |
1172 | struct hrtimer_clock_base *base, | |
1173 | struct hrtimer *timer, ktime_t *now) | |
d3d74453 | 1174 | { |
d3d74453 PZ |
1175 | enum hrtimer_restart (*fn)(struct hrtimer *); |
1176 | int restart; | |
1177 | ||
887d9dc9 | 1178 | lockdep_assert_held(&cpu_base->lock); |
ca109491 | 1179 | |
c6a2a177 | 1180 | debug_deactivate(timer); |
887d9dc9 PZ |
1181 | cpu_base->running = timer; |
1182 | ||
1183 | /* | |
1184 | * Separate the ->running assignment from the ->state assignment. | |
1185 | * | |
1186 | * As with a regular write barrier, this ensures the read side in | |
1187 | * hrtimer_active() cannot observe cpu_base->running == NULL && | |
1188 | * timer->state == INACTIVE. | |
1189 | */ | |
1190 | raw_write_seqcount_barrier(&cpu_base->seq); | |
1191 | ||
1192 | __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0); | |
d3d74453 | 1193 | fn = timer->function; |
ca109491 | 1194 | |
203cbf77 TG |
1195 | /* |
1196 | * Clear the 'is relative' flag for the TIME_LOW_RES case. If the | |
1197 | * timer is restarted with a period then it becomes an absolute | |
1198 | * timer. If its not restarted it does not matter. | |
1199 | */ | |
1200 | if (IS_ENABLED(CONFIG_TIME_LOW_RES)) | |
1201 | timer->is_rel = false; | |
1202 | ||
ca109491 PZ |
1203 | /* |
1204 | * Because we run timers from hardirq context, there is no chance | |
1205 | * they get migrated to another cpu, therefore its safe to unlock | |
1206 | * the timer base. | |
1207 | */ | |
ecb49d1a | 1208 | raw_spin_unlock(&cpu_base->lock); |
c6a2a177 | 1209 | trace_hrtimer_expire_entry(timer, now); |
ca109491 | 1210 | restart = fn(timer); |
c6a2a177 | 1211 | trace_hrtimer_expire_exit(timer); |
ecb49d1a | 1212 | raw_spin_lock(&cpu_base->lock); |
d3d74453 PZ |
1213 | |
1214 | /* | |
887d9dc9 | 1215 | * Note: We clear the running state after enqueue_hrtimer and |
b4d90e9f | 1216 | * we do not reprogram the event hardware. Happens either in |
e3f1d883 | 1217 | * hrtimer_start_range_ns() or in hrtimer_interrupt() |
5de2755c PZ |
1218 | * |
1219 | * Note: Because we dropped the cpu_base->lock above, | |
1220 | * hrtimer_start_range_ns() can have popped in and enqueued the timer | |
1221 | * for us already. | |
d3d74453 | 1222 | */ |
5de2755c PZ |
1223 | if (restart != HRTIMER_NORESTART && |
1224 | !(timer->state & HRTIMER_STATE_ENQUEUED)) | |
a6037b61 | 1225 | enqueue_hrtimer(timer, base); |
f13d4f97 | 1226 | |
887d9dc9 PZ |
1227 | /* |
1228 | * Separate the ->running assignment from the ->state assignment. | |
1229 | * | |
1230 | * As with a regular write barrier, this ensures the read side in | |
1231 | * hrtimer_active() cannot observe cpu_base->running == NULL && | |
1232 | * timer->state == INACTIVE. | |
1233 | */ | |
1234 | raw_write_seqcount_barrier(&cpu_base->seq); | |
f13d4f97 | 1235 | |
887d9dc9 PZ |
1236 | WARN_ON_ONCE(cpu_base->running != timer); |
1237 | cpu_base->running = NULL; | |
d3d74453 PZ |
1238 | } |
1239 | ||
21d6d52a | 1240 | static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now) |
54cdfdb4 | 1241 | { |
34aee88a TG |
1242 | struct hrtimer_clock_base *base = cpu_base->clock_base; |
1243 | unsigned int active = cpu_base->active_bases; | |
6ff7041d | 1244 | |
34aee88a | 1245 | for (; active; base++, active >>= 1) { |
998adc3d | 1246 | struct timerqueue_node *node; |
ab8177bc TG |
1247 | ktime_t basenow; |
1248 | ||
34aee88a | 1249 | if (!(active & 0x01)) |
ab8177bc | 1250 | continue; |
54cdfdb4 | 1251 | |
54cdfdb4 TG |
1252 | basenow = ktime_add(now, base->offset); |
1253 | ||
998adc3d | 1254 | while ((node = timerqueue_getnext(&base->active))) { |
54cdfdb4 TG |
1255 | struct hrtimer *timer; |
1256 | ||
998adc3d | 1257 | timer = container_of(node, struct hrtimer, node); |
54cdfdb4 | 1258 | |
654c8e0b AV |
1259 | /* |
1260 | * The immediate goal for using the softexpires is | |
1261 | * minimizing wakeups, not running timers at the | |
1262 | * earliest interrupt after their soft expiration. | |
1263 | * This allows us to avoid using a Priority Search | |
1264 | * Tree, which can answer a stabbing querry for | |
1265 | * overlapping intervals and instead use the simple | |
1266 | * BST we already have. | |
1267 | * We don't add extra wakeups by delaying timers that | |
1268 | * are right-of a not yet expired timer, because that | |
1269 | * timer will have to trigger a wakeup anyway. | |
1270 | */ | |
2456e855 | 1271 | if (basenow < hrtimer_get_softexpires_tv64(timer)) |
54cdfdb4 | 1272 | break; |
54cdfdb4 | 1273 | |
21d6d52a | 1274 | __run_hrtimer(cpu_base, base, timer, &basenow); |
54cdfdb4 | 1275 | } |
54cdfdb4 | 1276 | } |
21d6d52a TG |
1277 | } |
1278 | ||
1279 | #ifdef CONFIG_HIGH_RES_TIMERS | |
1280 | ||
1281 | /* | |
1282 | * High resolution timer interrupt | |
1283 | * Called with interrupts disabled | |
1284 | */ | |
1285 | void hrtimer_interrupt(struct clock_event_device *dev) | |
1286 | { | |
1287 | struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); | |
1288 | ktime_t expires_next, now, entry_time, delta; | |
1289 | int retries = 0; | |
1290 | ||
1291 | BUG_ON(!cpu_base->hres_active); | |
1292 | cpu_base->nr_events++; | |
2456e855 | 1293 | dev->next_event = KTIME_MAX; |
21d6d52a TG |
1294 | |
1295 | raw_spin_lock(&cpu_base->lock); | |
1296 | entry_time = now = hrtimer_update_base(cpu_base); | |
1297 | retry: | |
1298 | cpu_base->in_hrtirq = 1; | |
1299 | /* | |
1300 | * We set expires_next to KTIME_MAX here with cpu_base->lock | |
1301 | * held to prevent that a timer is enqueued in our queue via | |
1302 | * the migration code. This does not affect enqueueing of | |
1303 | * timers which run their callback and need to be requeued on | |
1304 | * this CPU. | |
1305 | */ | |
2456e855 | 1306 | cpu_base->expires_next = KTIME_MAX; |
21d6d52a TG |
1307 | |
1308 | __hrtimer_run_queues(cpu_base, now); | |
1309 | ||
9bc74919 TG |
1310 | /* Reevaluate the clock bases for the next expiry */ |
1311 | expires_next = __hrtimer_get_next_event(cpu_base); | |
6ff7041d TG |
1312 | /* |
1313 | * Store the new expiry value so the migration code can verify | |
1314 | * against it. | |
1315 | */ | |
54cdfdb4 | 1316 | cpu_base->expires_next = expires_next; |
9bc74919 | 1317 | cpu_base->in_hrtirq = 0; |
ecb49d1a | 1318 | raw_spin_unlock(&cpu_base->lock); |
54cdfdb4 TG |
1319 | |
1320 | /* Reprogramming necessary ? */ | |
d2540875 | 1321 | if (!tick_program_event(expires_next, 0)) { |
41d2e494 TG |
1322 | cpu_base->hang_detected = 0; |
1323 | return; | |
54cdfdb4 | 1324 | } |
41d2e494 TG |
1325 | |
1326 | /* | |
1327 | * The next timer was already expired due to: | |
1328 | * - tracing | |
1329 | * - long lasting callbacks | |
1330 | * - being scheduled away when running in a VM | |
1331 | * | |
1332 | * We need to prevent that we loop forever in the hrtimer | |
1333 | * interrupt routine. We give it 3 attempts to avoid | |
1334 | * overreacting on some spurious event. | |
5baefd6d JS |
1335 | * |
1336 | * Acquire base lock for updating the offsets and retrieving | |
1337 | * the current time. | |
41d2e494 | 1338 | */ |
196951e9 | 1339 | raw_spin_lock(&cpu_base->lock); |
5baefd6d | 1340 | now = hrtimer_update_base(cpu_base); |
41d2e494 TG |
1341 | cpu_base->nr_retries++; |
1342 | if (++retries < 3) | |
1343 | goto retry; | |
1344 | /* | |
1345 | * Give the system a chance to do something else than looping | |
1346 | * here. We stored the entry time, so we know exactly how long | |
1347 | * we spent here. We schedule the next event this amount of | |
1348 | * time away. | |
1349 | */ | |
1350 | cpu_base->nr_hangs++; | |
1351 | cpu_base->hang_detected = 1; | |
196951e9 | 1352 | raw_spin_unlock(&cpu_base->lock); |
41d2e494 | 1353 | delta = ktime_sub(now, entry_time); |
2456e855 TG |
1354 | if ((unsigned int)delta > cpu_base->max_hang_time) |
1355 | cpu_base->max_hang_time = (unsigned int) delta; | |
41d2e494 TG |
1356 | /* |
1357 | * Limit it to a sensible value as we enforce a longer | |
1358 | * delay. Give the CPU at least 100ms to catch up. | |
1359 | */ | |
2456e855 | 1360 | if (delta > 100 * NSEC_PER_MSEC) |
41d2e494 TG |
1361 | expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC); |
1362 | else | |
1363 | expires_next = ktime_add(now, delta); | |
1364 | tick_program_event(expires_next, 1); | |
1365 | printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n", | |
1366 | ktime_to_ns(delta)); | |
54cdfdb4 TG |
1367 | } |
1368 | ||
8bdec955 TG |
1369 | /* |
1370 | * local version of hrtimer_peek_ahead_timers() called with interrupts | |
1371 | * disabled. | |
1372 | */ | |
c6eb3f70 | 1373 | static inline void __hrtimer_peek_ahead_timers(void) |
8bdec955 TG |
1374 | { |
1375 | struct tick_device *td; | |
1376 | ||
1377 | if (!hrtimer_hres_active()) | |
1378 | return; | |
1379 | ||
22127e93 | 1380 | td = this_cpu_ptr(&tick_cpu_device); |
8bdec955 TG |
1381 | if (td && td->evtdev) |
1382 | hrtimer_interrupt(td->evtdev); | |
1383 | } | |
1384 | ||
82c5b7b5 IM |
1385 | #else /* CONFIG_HIGH_RES_TIMERS */ |
1386 | ||
1387 | static inline void __hrtimer_peek_ahead_timers(void) { } | |
1388 | ||
1389 | #endif /* !CONFIG_HIGH_RES_TIMERS */ | |
82f67cd9 | 1390 | |
d3d74453 | 1391 | /* |
c6eb3f70 | 1392 | * Called from run_local_timers in hardirq context every jiffy |
d3d74453 | 1393 | */ |
833883d9 | 1394 | void hrtimer_run_queues(void) |
d3d74453 | 1395 | { |
dc5df73b | 1396 | struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); |
21d6d52a | 1397 | ktime_t now; |
c0a31329 | 1398 | |
e19ffe8b | 1399 | if (__hrtimer_hres_active(cpu_base)) |
d3d74453 | 1400 | return; |
54cdfdb4 | 1401 | |
d3d74453 | 1402 | /* |
c6eb3f70 TG |
1403 | * This _is_ ugly: We have to check periodically, whether we |
1404 | * can switch to highres and / or nohz mode. The clocksource | |
1405 | * switch happens with xtime_lock held. Notification from | |
1406 | * there only sets the check bit in the tick_oneshot code, | |
1407 | * otherwise we might deadlock vs. xtime_lock. | |
d3d74453 | 1408 | */ |
c6eb3f70 | 1409 | if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) { |
d3d74453 | 1410 | hrtimer_switch_to_hres(); |
3055adda | 1411 | return; |
833883d9 | 1412 | } |
c6eb3f70 | 1413 | |
21d6d52a TG |
1414 | raw_spin_lock(&cpu_base->lock); |
1415 | now = hrtimer_update_base(cpu_base); | |
1416 | __hrtimer_run_queues(cpu_base, now); | |
1417 | raw_spin_unlock(&cpu_base->lock); | |
c0a31329 TG |
1418 | } |
1419 | ||
10c94ec1 TG |
1420 | /* |
1421 | * Sleep related functions: | |
1422 | */ | |
c9cb2e3d | 1423 | static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) |
00362e33 TG |
1424 | { |
1425 | struct hrtimer_sleeper *t = | |
1426 | container_of(timer, struct hrtimer_sleeper, timer); | |
1427 | struct task_struct *task = t->task; | |
1428 | ||
1429 | t->task = NULL; | |
1430 | if (task) | |
1431 | wake_up_process(task); | |
1432 | ||
1433 | return HRTIMER_NORESTART; | |
1434 | } | |
1435 | ||
36c8b586 | 1436 | void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) |
00362e33 TG |
1437 | { |
1438 | sl->timer.function = hrtimer_wakeup; | |
1439 | sl->task = task; | |
1440 | } | |
2bc481cf | 1441 | EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); |
00362e33 | 1442 | |
669d7868 | 1443 | static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) |
432569bb | 1444 | { |
669d7868 | 1445 | hrtimer_init_sleeper(t, current); |
10c94ec1 | 1446 | |
432569bb RZ |
1447 | do { |
1448 | set_current_state(TASK_INTERRUPTIBLE); | |
cc584b21 | 1449 | hrtimer_start_expires(&t->timer, mode); |
432569bb | 1450 | |
54cdfdb4 | 1451 | if (likely(t->task)) |
b0f8c44f | 1452 | freezable_schedule(); |
432569bb | 1453 | |
669d7868 | 1454 | hrtimer_cancel(&t->timer); |
c9cb2e3d | 1455 | mode = HRTIMER_MODE_ABS; |
669d7868 TG |
1456 | |
1457 | } while (t->task && !signal_pending(current)); | |
432569bb | 1458 | |
3588a085 PZ |
1459 | __set_current_state(TASK_RUNNING); |
1460 | ||
669d7868 | 1461 | return t->task == NULL; |
10c94ec1 TG |
1462 | } |
1463 | ||
080344b9 ON |
1464 | static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp) |
1465 | { | |
1466 | struct timespec rmt; | |
1467 | ktime_t rem; | |
1468 | ||
cc584b21 | 1469 | rem = hrtimer_expires_remaining(timer); |
2456e855 | 1470 | if (rem <= 0) |
080344b9 ON |
1471 | return 0; |
1472 | rmt = ktime_to_timespec(rem); | |
1473 | ||
1474 | if (copy_to_user(rmtp, &rmt, sizeof(*rmtp))) | |
1475 | return -EFAULT; | |
1476 | ||
1477 | return 1; | |
1478 | } | |
1479 | ||
1711ef38 | 1480 | long __sched hrtimer_nanosleep_restart(struct restart_block *restart) |
10c94ec1 | 1481 | { |
669d7868 | 1482 | struct hrtimer_sleeper t; |
080344b9 | 1483 | struct timespec __user *rmtp; |
237fc6e7 | 1484 | int ret = 0; |
10c94ec1 | 1485 | |
ab8177bc | 1486 | hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid, |
237fc6e7 | 1487 | HRTIMER_MODE_ABS); |
cc584b21 | 1488 | hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); |
10c94ec1 | 1489 | |
c9cb2e3d | 1490 | if (do_nanosleep(&t, HRTIMER_MODE_ABS)) |
237fc6e7 | 1491 | goto out; |
10c94ec1 | 1492 | |
029a07e0 | 1493 | rmtp = restart->nanosleep.rmtp; |
432569bb | 1494 | if (rmtp) { |
237fc6e7 | 1495 | ret = update_rmtp(&t.timer, rmtp); |
080344b9 | 1496 | if (ret <= 0) |
237fc6e7 | 1497 | goto out; |
432569bb | 1498 | } |
10c94ec1 | 1499 | |
10c94ec1 | 1500 | /* The other values in restart are already filled in */ |
237fc6e7 TG |
1501 | ret = -ERESTART_RESTARTBLOCK; |
1502 | out: | |
1503 | destroy_hrtimer_on_stack(&t.timer); | |
1504 | return ret; | |
10c94ec1 TG |
1505 | } |
1506 | ||
080344b9 | 1507 | long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, |
10c94ec1 TG |
1508 | const enum hrtimer_mode mode, const clockid_t clockid) |
1509 | { | |
1510 | struct restart_block *restart; | |
669d7868 | 1511 | struct hrtimer_sleeper t; |
237fc6e7 | 1512 | int ret = 0; |
da8b44d5 | 1513 | u64 slack; |
3bd01206 AV |
1514 | |
1515 | slack = current->timer_slack_ns; | |
aab03e05 | 1516 | if (dl_task(current) || rt_task(current)) |
3bd01206 | 1517 | slack = 0; |
10c94ec1 | 1518 | |
237fc6e7 | 1519 | hrtimer_init_on_stack(&t.timer, clockid, mode); |
3bd01206 | 1520 | hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack); |
432569bb | 1521 | if (do_nanosleep(&t, mode)) |
237fc6e7 | 1522 | goto out; |
10c94ec1 | 1523 | |
7978672c | 1524 | /* Absolute timers do not update the rmtp value and restart: */ |
237fc6e7 TG |
1525 | if (mode == HRTIMER_MODE_ABS) { |
1526 | ret = -ERESTARTNOHAND; | |
1527 | goto out; | |
1528 | } | |
10c94ec1 | 1529 | |
432569bb | 1530 | if (rmtp) { |
237fc6e7 | 1531 | ret = update_rmtp(&t.timer, rmtp); |
080344b9 | 1532 | if (ret <= 0) |
237fc6e7 | 1533 | goto out; |
432569bb | 1534 | } |
10c94ec1 | 1535 | |
f56141e3 | 1536 | restart = ¤t->restart_block; |
1711ef38 | 1537 | restart->fn = hrtimer_nanosleep_restart; |
ab8177bc | 1538 | restart->nanosleep.clockid = t.timer.base->clockid; |
029a07e0 | 1539 | restart->nanosleep.rmtp = rmtp; |
cc584b21 | 1540 | restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); |
10c94ec1 | 1541 | |
237fc6e7 TG |
1542 | ret = -ERESTART_RESTARTBLOCK; |
1543 | out: | |
1544 | destroy_hrtimer_on_stack(&t.timer); | |
1545 | return ret; | |
10c94ec1 TG |
1546 | } |
1547 | ||
58fd3aa2 HC |
1548 | SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, |
1549 | struct timespec __user *, rmtp) | |
6ba1b912 | 1550 | { |
080344b9 | 1551 | struct timespec tu; |
6ba1b912 TG |
1552 | |
1553 | if (copy_from_user(&tu, rqtp, sizeof(tu))) | |
1554 | return -EFAULT; | |
1555 | ||
1556 | if (!timespec_valid(&tu)) | |
1557 | return -EINVAL; | |
1558 | ||
080344b9 | 1559 | return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC); |
6ba1b912 TG |
1560 | } |
1561 | ||
c0a31329 TG |
1562 | /* |
1563 | * Functions related to boot-time initialization: | |
1564 | */ | |
27590dc1 | 1565 | int hrtimers_prepare_cpu(unsigned int cpu) |
c0a31329 | 1566 | { |
3c8aa39d | 1567 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); |
c0a31329 TG |
1568 | int i; |
1569 | ||
998adc3d | 1570 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
3c8aa39d | 1571 | cpu_base->clock_base[i].cpu_base = cpu_base; |
998adc3d JS |
1572 | timerqueue_init_head(&cpu_base->clock_base[i].active); |
1573 | } | |
3c8aa39d | 1574 | |
cddd0248 | 1575 | cpu_base->cpu = cpu; |
54cdfdb4 | 1576 | hrtimer_init_hres(cpu_base); |
27590dc1 | 1577 | return 0; |
c0a31329 TG |
1578 | } |
1579 | ||
1580 | #ifdef CONFIG_HOTPLUG_CPU | |
1581 | ||
ca109491 | 1582 | static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, |
37810659 | 1583 | struct hrtimer_clock_base *new_base) |
c0a31329 TG |
1584 | { |
1585 | struct hrtimer *timer; | |
998adc3d | 1586 | struct timerqueue_node *node; |
c0a31329 | 1587 | |
998adc3d JS |
1588 | while ((node = timerqueue_getnext(&old_base->active))) { |
1589 | timer = container_of(node, struct hrtimer, node); | |
54cdfdb4 | 1590 | BUG_ON(hrtimer_callback_running(timer)); |
c6a2a177 | 1591 | debug_deactivate(timer); |
b00c1a99 TG |
1592 | |
1593 | /* | |
c04dca02 | 1594 | * Mark it as ENQUEUED not INACTIVE otherwise the |
b00c1a99 TG |
1595 | * timer could be seen as !active and just vanish away |
1596 | * under us on another CPU | |
1597 | */ | |
c04dca02 | 1598 | __remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0); |
c0a31329 | 1599 | timer->base = new_base; |
54cdfdb4 | 1600 | /* |
e3f1d883 TG |
1601 | * Enqueue the timers on the new cpu. This does not |
1602 | * reprogram the event device in case the timer | |
1603 | * expires before the earliest on this CPU, but we run | |
1604 | * hrtimer_interrupt after we migrated everything to | |
1605 | * sort out already expired timers and reprogram the | |
1606 | * event device. | |
54cdfdb4 | 1607 | */ |
a6037b61 | 1608 | enqueue_hrtimer(timer, new_base); |
c0a31329 TG |
1609 | } |
1610 | } | |
1611 | ||
27590dc1 | 1612 | int hrtimers_dead_cpu(unsigned int scpu) |
c0a31329 | 1613 | { |
3c8aa39d | 1614 | struct hrtimer_cpu_base *old_base, *new_base; |
731a55ba | 1615 | int i; |
c0a31329 | 1616 | |
37810659 | 1617 | BUG_ON(cpu_online(scpu)); |
37810659 | 1618 | tick_cancel_sched_timer(scpu); |
731a55ba TG |
1619 | |
1620 | local_irq_disable(); | |
1621 | old_base = &per_cpu(hrtimer_bases, scpu); | |
dc5df73b | 1622 | new_base = this_cpu_ptr(&hrtimer_bases); |
d82f0b0f ON |
1623 | /* |
1624 | * The caller is globally serialized and nobody else | |
1625 | * takes two locks at once, deadlock is not possible. | |
1626 | */ | |
ecb49d1a TG |
1627 | raw_spin_lock(&new_base->lock); |
1628 | raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); | |
c0a31329 | 1629 | |
3c8aa39d | 1630 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
ca109491 | 1631 | migrate_hrtimer_list(&old_base->clock_base[i], |
37810659 | 1632 | &new_base->clock_base[i]); |
c0a31329 TG |
1633 | } |
1634 | ||
ecb49d1a TG |
1635 | raw_spin_unlock(&old_base->lock); |
1636 | raw_spin_unlock(&new_base->lock); | |
37810659 | 1637 | |
731a55ba TG |
1638 | /* Check, if we got expired work to do */ |
1639 | __hrtimer_peek_ahead_timers(); | |
1640 | local_irq_enable(); | |
27590dc1 | 1641 | return 0; |
c0a31329 | 1642 | } |
37810659 | 1643 | |
c0a31329 TG |
1644 | #endif /* CONFIG_HOTPLUG_CPU */ |
1645 | ||
c0a31329 TG |
1646 | void __init hrtimers_init(void) |
1647 | { | |
27590dc1 | 1648 | hrtimers_prepare_cpu(smp_processor_id()); |
c0a31329 TG |
1649 | } |
1650 | ||
7bb67439 | 1651 | /** |
351b3f7a | 1652 | * schedule_hrtimeout_range_clock - sleep until timeout |
7bb67439 | 1653 | * @expires: timeout value (ktime_t) |
654c8e0b | 1654 | * @delta: slack in expires timeout (ktime_t) |
7bb67439 | 1655 | * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL |
351b3f7a | 1656 | * @clock: timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME |
7bb67439 | 1657 | */ |
351b3f7a | 1658 | int __sched |
da8b44d5 | 1659 | schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta, |
351b3f7a | 1660 | const enum hrtimer_mode mode, int clock) |
7bb67439 AV |
1661 | { |
1662 | struct hrtimer_sleeper t; | |
1663 | ||
1664 | /* | |
1665 | * Optimize when a zero timeout value is given. It does not | |
1666 | * matter whether this is an absolute or a relative time. | |
1667 | */ | |
2456e855 | 1668 | if (expires && *expires == 0) { |
7bb67439 AV |
1669 | __set_current_state(TASK_RUNNING); |
1670 | return 0; | |
1671 | } | |
1672 | ||
1673 | /* | |
43b21013 | 1674 | * A NULL parameter means "infinite" |
7bb67439 AV |
1675 | */ |
1676 | if (!expires) { | |
1677 | schedule(); | |
7bb67439 AV |
1678 | return -EINTR; |
1679 | } | |
1680 | ||
351b3f7a | 1681 | hrtimer_init_on_stack(&t.timer, clock, mode); |
654c8e0b | 1682 | hrtimer_set_expires_range_ns(&t.timer, *expires, delta); |
7bb67439 AV |
1683 | |
1684 | hrtimer_init_sleeper(&t, current); | |
1685 | ||
cc584b21 | 1686 | hrtimer_start_expires(&t.timer, mode); |
7bb67439 AV |
1687 | |
1688 | if (likely(t.task)) | |
1689 | schedule(); | |
1690 | ||
1691 | hrtimer_cancel(&t.timer); | |
1692 | destroy_hrtimer_on_stack(&t.timer); | |
1693 | ||
1694 | __set_current_state(TASK_RUNNING); | |
1695 | ||
1696 | return !t.task ? 0 : -EINTR; | |
1697 | } | |
351b3f7a CE |
1698 | |
1699 | /** | |
1700 | * schedule_hrtimeout_range - sleep until timeout | |
1701 | * @expires: timeout value (ktime_t) | |
1702 | * @delta: slack in expires timeout (ktime_t) | |
1703 | * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL | |
1704 | * | |
1705 | * Make the current task sleep until the given expiry time has | |
1706 | * elapsed. The routine will return immediately unless | |
1707 | * the current task state has been set (see set_current_state()). | |
1708 | * | |
1709 | * The @delta argument gives the kernel the freedom to schedule the | |
1710 | * actual wakeup to a time that is both power and performance friendly. | |
1711 | * The kernel give the normal best effort behavior for "@expires+@delta", | |
1712 | * but may decide to fire the timer earlier, but no earlier than @expires. | |
1713 | * | |
1714 | * You can set the task state as follows - | |
1715 | * | |
1716 | * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to | |
4b7e9cf9 DA |
1717 | * pass before the routine returns unless the current task is explicitly |
1718 | * woken up, (e.g. by wake_up_process()). | |
351b3f7a CE |
1719 | * |
1720 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | |
4b7e9cf9 DA |
1721 | * delivered to the current task or the current task is explicitly woken |
1722 | * up. | |
351b3f7a CE |
1723 | * |
1724 | * The current task state is guaranteed to be TASK_RUNNING when this | |
1725 | * routine returns. | |
1726 | * | |
4b7e9cf9 DA |
1727 | * Returns 0 when the timer has expired. If the task was woken before the |
1728 | * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or | |
1729 | * by an explicit wakeup, it returns -EINTR. | |
351b3f7a | 1730 | */ |
da8b44d5 | 1731 | int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta, |
351b3f7a CE |
1732 | const enum hrtimer_mode mode) |
1733 | { | |
1734 | return schedule_hrtimeout_range_clock(expires, delta, mode, | |
1735 | CLOCK_MONOTONIC); | |
1736 | } | |
654c8e0b AV |
1737 | EXPORT_SYMBOL_GPL(schedule_hrtimeout_range); |
1738 | ||
1739 | /** | |
1740 | * schedule_hrtimeout - sleep until timeout | |
1741 | * @expires: timeout value (ktime_t) | |
1742 | * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL | |
1743 | * | |
1744 | * Make the current task sleep until the given expiry time has | |
1745 | * elapsed. The routine will return immediately unless | |
1746 | * the current task state has been set (see set_current_state()). | |
1747 | * | |
1748 | * You can set the task state as follows - | |
1749 | * | |
1750 | * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to | |
4b7e9cf9 DA |
1751 | * pass before the routine returns unless the current task is explicitly |
1752 | * woken up, (e.g. by wake_up_process()). | |
654c8e0b AV |
1753 | * |
1754 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | |
4b7e9cf9 DA |
1755 | * delivered to the current task or the current task is explicitly woken |
1756 | * up. | |
654c8e0b AV |
1757 | * |
1758 | * The current task state is guaranteed to be TASK_RUNNING when this | |
1759 | * routine returns. | |
1760 | * | |
4b7e9cf9 DA |
1761 | * Returns 0 when the timer has expired. If the task was woken before the |
1762 | * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or | |
1763 | * by an explicit wakeup, it returns -EINTR. | |
654c8e0b AV |
1764 | */ |
1765 | int __sched schedule_hrtimeout(ktime_t *expires, | |
1766 | const enum hrtimer_mode mode) | |
1767 | { | |
1768 | return schedule_hrtimeout_range(expires, 0, mode); | |
1769 | } | |
7bb67439 | 1770 | EXPORT_SYMBOL_GPL(schedule_hrtimeout); |