Commit | Line | Data |
---|---|---|
35728b82 | 1 | // SPDX-License-Identifier: GPL-2.0 |
c0a31329 | 2 | /* |
3c8aa39d | 3 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> |
79bf2bb3 | 4 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar |
54cdfdb4 | 5 | * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner |
c0a31329 TG |
6 | * |
7 | * High-resolution kernel timers | |
8 | * | |
58c5fc2b TG |
9 | * In contrast to the low-resolution timeout API, aka timer wheel, |
10 | * hrtimers provide finer resolution and accuracy depending on system | |
11 | * configuration and capabilities. | |
c0a31329 TG |
12 | * |
13 | * Started by: Thomas Gleixner and Ingo Molnar | |
14 | * | |
15 | * Credits: | |
58c5fc2b | 16 | * Based on the original timer wheel code |
c0a31329 | 17 | * |
66188fae TG |
18 | * Help, testing, suggestions, bugfixes, improvements were |
19 | * provided by: | |
20 | * | |
21 | * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel | |
22 | * et. al. | |
c0a31329 TG |
23 | */ |
24 | ||
25 | #include <linux/cpu.h> | |
9984de1a | 26 | #include <linux/export.h> |
c0a31329 TG |
27 | #include <linux/percpu.h> |
28 | #include <linux/hrtimer.h> | |
29 | #include <linux/notifier.h> | |
30 | #include <linux/syscalls.h> | |
31 | #include <linux/interrupt.h> | |
79bf2bb3 | 32 | #include <linux/tick.h> |
54cdfdb4 | 33 | #include <linux/err.h> |
237fc6e7 | 34 | #include <linux/debugobjects.h> |
174cd4b1 | 35 | #include <linux/sched/signal.h> |
cf4aebc2 | 36 | #include <linux/sched/sysctl.h> |
8bd75c77 | 37 | #include <linux/sched/rt.h> |
aab03e05 | 38 | #include <linux/sched/deadline.h> |
370c9135 | 39 | #include <linux/sched/nohz.h> |
b17b0153 | 40 | #include <linux/sched/debug.h> |
eea08f32 | 41 | #include <linux/timer.h> |
b0f8c44f | 42 | #include <linux/freezer.h> |
edbeda46 | 43 | #include <linux/compat.h> |
c0a31329 | 44 | |
7c0f6ba6 | 45 | #include <linux/uaccess.h> |
c0a31329 | 46 | |
c6a2a177 XG |
47 | #include <trace/events/timer.h> |
48 | ||
c1797baf | 49 | #include "tick-internal.h" |
8b094cd0 | 50 | |
c458b1d1 AMG |
51 | /* |
52 | * Masks for selecting the soft and hard context timers from | |
53 | * cpu_base->active | |
54 | */ | |
55 | #define MASK_SHIFT (HRTIMER_BASE_MONOTONIC_SOFT) | |
56 | #define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1) | |
57 | #define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT) | |
58 | #define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD) | |
59 | ||
c0a31329 TG |
60 | /* |
61 | * The timer bases: | |
7978672c | 62 | * |
571af55a | 63 | * There are more clockids than hrtimer bases. Thus, we index |
e06383db JS |
64 | * into the timer bases by the hrtimer_base_type enum. When trying |
65 | * to reach a base using a clockid, hrtimer_clockid_to_base() | |
66 | * is used to convert from clockid to the proper hrtimer_base_type. | |
c0a31329 | 67 | */ |
54cdfdb4 | 68 | DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = |
c0a31329 | 69 | { |
84cc8fd2 | 70 | .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock), |
3c8aa39d | 71 | .clock_base = |
c0a31329 | 72 | { |
3c8aa39d | 73 | { |
ab8177bc TG |
74 | .index = HRTIMER_BASE_MONOTONIC, |
75 | .clockid = CLOCK_MONOTONIC, | |
3c8aa39d | 76 | .get_time = &ktime_get, |
3c8aa39d | 77 | }, |
68fa61c0 TG |
78 | { |
79 | .index = HRTIMER_BASE_REALTIME, | |
80 | .clockid = CLOCK_REALTIME, | |
81 | .get_time = &ktime_get_real, | |
68fa61c0 | 82 | }, |
a3ed0e43 TG |
83 | { |
84 | .index = HRTIMER_BASE_BOOTTIME, | |
85 | .clockid = CLOCK_BOOTTIME, | |
86 | .get_time = &ktime_get_boottime, | |
87 | }, | |
90adda98 JS |
88 | { |
89 | .index = HRTIMER_BASE_TAI, | |
90 | .clockid = CLOCK_TAI, | |
91 | .get_time = &ktime_get_clocktai, | |
90adda98 | 92 | }, |
98ecadd4 AMG |
93 | { |
94 | .index = HRTIMER_BASE_MONOTONIC_SOFT, | |
95 | .clockid = CLOCK_MONOTONIC, | |
96 | .get_time = &ktime_get, | |
97 | }, | |
98 | { | |
99 | .index = HRTIMER_BASE_REALTIME_SOFT, | |
100 | .clockid = CLOCK_REALTIME, | |
101 | .get_time = &ktime_get_real, | |
102 | }, | |
a3ed0e43 TG |
103 | { |
104 | .index = HRTIMER_BASE_BOOTTIME_SOFT, | |
105 | .clockid = CLOCK_BOOTTIME, | |
106 | .get_time = &ktime_get_boottime, | |
107 | }, | |
98ecadd4 AMG |
108 | { |
109 | .index = HRTIMER_BASE_TAI_SOFT, | |
110 | .clockid = CLOCK_TAI, | |
111 | .get_time = &ktime_get_clocktai, | |
112 | }, | |
3c8aa39d | 113 | } |
c0a31329 TG |
114 | }; |
115 | ||
942c3c5c | 116 | static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = { |
336a9cde MZ |
117 | /* Make sure we catch unsupported clockids */ |
118 | [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES, | |
119 | ||
ce31332d TG |
120 | [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME, |
121 | [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC, | |
a3ed0e43 | 122 | [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME, |
90adda98 | 123 | [CLOCK_TAI] = HRTIMER_BASE_TAI, |
ce31332d | 124 | }; |
e06383db | 125 | |
c0a31329 TG |
126 | /* |
127 | * Functions and macros which are different for UP/SMP systems are kept in a | |
128 | * single place | |
129 | */ | |
130 | #ifdef CONFIG_SMP | |
131 | ||
887d9dc9 PZ |
132 | /* |
133 | * We require the migration_base for lock_hrtimer_base()/switch_hrtimer_base() | |
134 | * such that hrtimer_callback_running() can unconditionally dereference | |
135 | * timer->base->cpu_base | |
136 | */ | |
137 | static struct hrtimer_cpu_base migration_cpu_base = { | |
887d9dc9 PZ |
138 | .clock_base = { { .cpu_base = &migration_cpu_base, }, }, |
139 | }; | |
140 | ||
141 | #define migration_base migration_cpu_base.clock_base[0] | |
142 | ||
c0a31329 TG |
143 | /* |
144 | * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock | |
145 | * means that all timers which are tied to this base via timer->base are | |
146 | * locked, and the base itself is locked too. | |
147 | * | |
148 | * So __run_timers/migrate_timers can safely modify all timers which could | |
149 | * be found on the lists/queues. | |
150 | * | |
151 | * When the timer's base is locked, and the timer removed from list, it is | |
887d9dc9 PZ |
152 | * possible to set timer->base = &migration_base and drop the lock: the timer |
153 | * remains locked. | |
c0a31329 | 154 | */ |
3c8aa39d TG |
155 | static |
156 | struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, | |
157 | unsigned long *flags) | |
c0a31329 | 158 | { |
3c8aa39d | 159 | struct hrtimer_clock_base *base; |
c0a31329 TG |
160 | |
161 | for (;;) { | |
162 | base = timer->base; | |
887d9dc9 | 163 | if (likely(base != &migration_base)) { |
ecb49d1a | 164 | raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); |
c0a31329 TG |
165 | if (likely(base == timer->base)) |
166 | return base; | |
167 | /* The timer has migrated to another CPU: */ | |
ecb49d1a | 168 | raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags); |
c0a31329 TG |
169 | } |
170 | cpu_relax(); | |
171 | } | |
172 | } | |
173 | ||
6ff7041d | 174 | /* |
07a9a7ea AMG |
175 | * We do not migrate the timer when it is expiring before the next |
176 | * event on the target cpu. When high resolution is enabled, we cannot | |
177 | * reprogram the target cpu hardware and we would cause it to fire | |
178 | * late. To keep it simple, we handle the high resolution enabled and | |
179 | * disabled case similar. | |
6ff7041d TG |
180 | * |
181 | * Called with cpu_base->lock of target cpu held. | |
182 | */ | |
183 | static int | |
184 | hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base) | |
185 | { | |
6ff7041d TG |
186 | ktime_t expires; |
187 | ||
6ff7041d | 188 | expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); |
2ac2dccc | 189 | return expires < new_base->cpu_base->expires_next; |
6ff7041d TG |
190 | } |
191 | ||
bc7a34b8 TG |
192 | static inline |
193 | struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, | |
194 | int pinned) | |
195 | { | |
ae67bada TG |
196 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) |
197 | if (static_branch_likely(&timers_migration_enabled) && !pinned) | |
198 | return &per_cpu(hrtimer_bases, get_nohz_timer_target()); | |
199 | #endif | |
662b3e19 | 200 | return base; |
bc7a34b8 | 201 | } |
bc7a34b8 | 202 | |
c0a31329 | 203 | /* |
b48362d8 FW |
204 | * We switch the timer base to a power-optimized selected CPU target, |
205 | * if: | |
206 | * - NO_HZ_COMMON is enabled | |
207 | * - timer migration is enabled | |
208 | * - the timer callback is not running | |
209 | * - the timer is not the first expiring timer on the new target | |
210 | * | |
211 | * If one of the above requirements is not fulfilled we move the timer | |
212 | * to the current CPU or leave it on the previously assigned CPU if | |
213 | * the timer callback is currently running. | |
c0a31329 | 214 | */ |
3c8aa39d | 215 | static inline struct hrtimer_clock_base * |
597d0275 AB |
216 | switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, |
217 | int pinned) | |
c0a31329 | 218 | { |
b48362d8 | 219 | struct hrtimer_cpu_base *new_cpu_base, *this_cpu_base; |
3c8aa39d | 220 | struct hrtimer_clock_base *new_base; |
ab8177bc | 221 | int basenum = base->index; |
c0a31329 | 222 | |
b48362d8 FW |
223 | this_cpu_base = this_cpu_ptr(&hrtimer_bases); |
224 | new_cpu_base = get_target_base(this_cpu_base, pinned); | |
eea08f32 | 225 | again: |
e06383db | 226 | new_base = &new_cpu_base->clock_base[basenum]; |
c0a31329 TG |
227 | |
228 | if (base != new_base) { | |
229 | /* | |
6ff7041d | 230 | * We are trying to move timer to new_base. |
c0a31329 TG |
231 | * However we can't change timer's base while it is running, |
232 | * so we keep it on the same CPU. No hassle vs. reprogramming | |
233 | * the event source in the high resolution case. The softirq | |
234 | * code will take care of this when the timer function has | |
235 | * completed. There is no conflict as we hold the lock until | |
236 | * the timer is enqueued. | |
237 | */ | |
54cdfdb4 | 238 | if (unlikely(hrtimer_callback_running(timer))) |
c0a31329 TG |
239 | return base; |
240 | ||
887d9dc9 PZ |
241 | /* See the comment in lock_hrtimer_base() */ |
242 | timer->base = &migration_base; | |
ecb49d1a TG |
243 | raw_spin_unlock(&base->cpu_base->lock); |
244 | raw_spin_lock(&new_base->cpu_base->lock); | |
eea08f32 | 245 | |
b48362d8 | 246 | if (new_cpu_base != this_cpu_base && |
bc7a34b8 | 247 | hrtimer_check_target(timer, new_base)) { |
ecb49d1a TG |
248 | raw_spin_unlock(&new_base->cpu_base->lock); |
249 | raw_spin_lock(&base->cpu_base->lock); | |
b48362d8 | 250 | new_cpu_base = this_cpu_base; |
6ff7041d TG |
251 | timer->base = base; |
252 | goto again; | |
eea08f32 | 253 | } |
c0a31329 | 254 | timer->base = new_base; |
012a45e3 | 255 | } else { |
b48362d8 | 256 | if (new_cpu_base != this_cpu_base && |
bc7a34b8 | 257 | hrtimer_check_target(timer, new_base)) { |
b48362d8 | 258 | new_cpu_base = this_cpu_base; |
012a45e3 LM |
259 | goto again; |
260 | } | |
c0a31329 TG |
261 | } |
262 | return new_base; | |
263 | } | |
264 | ||
265 | #else /* CONFIG_SMP */ | |
266 | ||
3c8aa39d | 267 | static inline struct hrtimer_clock_base * |
c0a31329 TG |
268 | lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) |
269 | { | |
3c8aa39d | 270 | struct hrtimer_clock_base *base = timer->base; |
c0a31329 | 271 | |
ecb49d1a | 272 | raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); |
c0a31329 TG |
273 | |
274 | return base; | |
275 | } | |
276 | ||
eea08f32 | 277 | # define switch_hrtimer_base(t, b, p) (b) |
c0a31329 TG |
278 | |
279 | #endif /* !CONFIG_SMP */ | |
280 | ||
281 | /* | |
282 | * Functions for the union type storage format of ktime_t which are | |
283 | * too large for inlining: | |
284 | */ | |
285 | #if BITS_PER_LONG < 64 | |
c0a31329 TG |
286 | /* |
287 | * Divide a ktime value by a nanosecond value | |
288 | */ | |
f7bcb70e | 289 | s64 __ktime_divns(const ktime_t kt, s64 div) |
c0a31329 | 290 | { |
c0a31329 | 291 | int sft = 0; |
f7bcb70e JS |
292 | s64 dclc; |
293 | u64 tmp; | |
c0a31329 | 294 | |
900cfa46 | 295 | dclc = ktime_to_ns(kt); |
f7bcb70e JS |
296 | tmp = dclc < 0 ? -dclc : dclc; |
297 | ||
c0a31329 TG |
298 | /* Make sure the divisor is less than 2^32: */ |
299 | while (div >> 32) { | |
300 | sft++; | |
301 | div >>= 1; | |
302 | } | |
f7bcb70e JS |
303 | tmp >>= sft; |
304 | do_div(tmp, (unsigned long) div); | |
305 | return dclc < 0 ? -tmp : tmp; | |
c0a31329 | 306 | } |
8b618628 | 307 | EXPORT_SYMBOL_GPL(__ktime_divns); |
c0a31329 TG |
308 | #endif /* BITS_PER_LONG >= 64 */ |
309 | ||
5a7780e7 TG |
310 | /* |
311 | * Add two ktime values and do a safety check for overflow: | |
312 | */ | |
313 | ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs) | |
314 | { | |
979515c5 | 315 | ktime_t res = ktime_add_unsafe(lhs, rhs); |
5a7780e7 TG |
316 | |
317 | /* | |
318 | * We use KTIME_SEC_MAX here, the maximum timeout which we can | |
319 | * return to user space in a timespec: | |
320 | */ | |
2456e855 | 321 | if (res < 0 || res < lhs || res < rhs) |
5a7780e7 TG |
322 | res = ktime_set(KTIME_SEC_MAX, 0); |
323 | ||
324 | return res; | |
325 | } | |
326 | ||
8daa21e6 AB |
327 | EXPORT_SYMBOL_GPL(ktime_add_safe); |
328 | ||
237fc6e7 TG |
329 | #ifdef CONFIG_DEBUG_OBJECTS_TIMERS |
330 | ||
331 | static struct debug_obj_descr hrtimer_debug_descr; | |
332 | ||
99777288 SG |
333 | static void *hrtimer_debug_hint(void *addr) |
334 | { | |
335 | return ((struct hrtimer *) addr)->function; | |
336 | } | |
337 | ||
237fc6e7 TG |
338 | /* |
339 | * fixup_init is called when: | |
340 | * - an active object is initialized | |
341 | */ | |
e3252464 | 342 | static bool hrtimer_fixup_init(void *addr, enum debug_obj_state state) |
237fc6e7 TG |
343 | { |
344 | struct hrtimer *timer = addr; | |
345 | ||
346 | switch (state) { | |
347 | case ODEBUG_STATE_ACTIVE: | |
348 | hrtimer_cancel(timer); | |
349 | debug_object_init(timer, &hrtimer_debug_descr); | |
e3252464 | 350 | return true; |
237fc6e7 | 351 | default: |
e3252464 | 352 | return false; |
237fc6e7 TG |
353 | } |
354 | } | |
355 | ||
356 | /* | |
357 | * fixup_activate is called when: | |
358 | * - an active object is activated | |
b9fdac7f | 359 | * - an unknown non-static object is activated |
237fc6e7 | 360 | */ |
e3252464 | 361 | static bool hrtimer_fixup_activate(void *addr, enum debug_obj_state state) |
237fc6e7 TG |
362 | { |
363 | switch (state) { | |
237fc6e7 TG |
364 | case ODEBUG_STATE_ACTIVE: |
365 | WARN_ON(1); | |
75b710af | 366 | /* fall through */ |
237fc6e7 | 367 | default: |
e3252464 | 368 | return false; |
237fc6e7 TG |
369 | } |
370 | } | |
371 | ||
372 | /* | |
373 | * fixup_free is called when: | |
374 | * - an active object is freed | |
375 | */ | |
e3252464 | 376 | static bool hrtimer_fixup_free(void *addr, enum debug_obj_state state) |
237fc6e7 TG |
377 | { |
378 | struct hrtimer *timer = addr; | |
379 | ||
380 | switch (state) { | |
381 | case ODEBUG_STATE_ACTIVE: | |
382 | hrtimer_cancel(timer); | |
383 | debug_object_free(timer, &hrtimer_debug_descr); | |
e3252464 | 384 | return true; |
237fc6e7 | 385 | default: |
e3252464 | 386 | return false; |
237fc6e7 TG |
387 | } |
388 | } | |
389 | ||
390 | static struct debug_obj_descr hrtimer_debug_descr = { | |
391 | .name = "hrtimer", | |
99777288 | 392 | .debug_hint = hrtimer_debug_hint, |
237fc6e7 TG |
393 | .fixup_init = hrtimer_fixup_init, |
394 | .fixup_activate = hrtimer_fixup_activate, | |
395 | .fixup_free = hrtimer_fixup_free, | |
396 | }; | |
397 | ||
398 | static inline void debug_hrtimer_init(struct hrtimer *timer) | |
399 | { | |
400 | debug_object_init(timer, &hrtimer_debug_descr); | |
401 | } | |
402 | ||
5da70160 AMG |
403 | static inline void debug_hrtimer_activate(struct hrtimer *timer, |
404 | enum hrtimer_mode mode) | |
237fc6e7 TG |
405 | { |
406 | debug_object_activate(timer, &hrtimer_debug_descr); | |
407 | } | |
408 | ||
409 | static inline void debug_hrtimer_deactivate(struct hrtimer *timer) | |
410 | { | |
411 | debug_object_deactivate(timer, &hrtimer_debug_descr); | |
412 | } | |
413 | ||
414 | static inline void debug_hrtimer_free(struct hrtimer *timer) | |
415 | { | |
416 | debug_object_free(timer, &hrtimer_debug_descr); | |
417 | } | |
418 | ||
419 | static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | |
420 | enum hrtimer_mode mode); | |
421 | ||
422 | void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id, | |
423 | enum hrtimer_mode mode) | |
424 | { | |
425 | debug_object_init_on_stack(timer, &hrtimer_debug_descr); | |
426 | __hrtimer_init(timer, clock_id, mode); | |
427 | } | |
2bc481cf | 428 | EXPORT_SYMBOL_GPL(hrtimer_init_on_stack); |
237fc6e7 | 429 | |
dbc1625f SAS |
430 | static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl, |
431 | clockid_t clock_id, enum hrtimer_mode mode); | |
432 | ||
433 | void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl, | |
434 | clockid_t clock_id, enum hrtimer_mode mode) | |
435 | { | |
436 | debug_object_init_on_stack(&sl->timer, &hrtimer_debug_descr); | |
437 | __hrtimer_init_sleeper(sl, clock_id, mode); | |
438 | } | |
439 | EXPORT_SYMBOL_GPL(hrtimer_init_sleeper_on_stack); | |
440 | ||
237fc6e7 TG |
441 | void destroy_hrtimer_on_stack(struct hrtimer *timer) |
442 | { | |
443 | debug_object_free(timer, &hrtimer_debug_descr); | |
444 | } | |
c08376ac | 445 | EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack); |
237fc6e7 TG |
446 | |
447 | #else | |
5da70160 | 448 | |
237fc6e7 | 449 | static inline void debug_hrtimer_init(struct hrtimer *timer) { } |
5da70160 AMG |
450 | static inline void debug_hrtimer_activate(struct hrtimer *timer, |
451 | enum hrtimer_mode mode) { } | |
237fc6e7 TG |
452 | static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } |
453 | #endif | |
454 | ||
c6a2a177 XG |
455 | static inline void |
456 | debug_init(struct hrtimer *timer, clockid_t clockid, | |
457 | enum hrtimer_mode mode) | |
458 | { | |
459 | debug_hrtimer_init(timer); | |
460 | trace_hrtimer_init(timer, clockid, mode); | |
461 | } | |
462 | ||
63e2ed36 AMG |
463 | static inline void debug_activate(struct hrtimer *timer, |
464 | enum hrtimer_mode mode) | |
c6a2a177 | 465 | { |
5da70160 | 466 | debug_hrtimer_activate(timer, mode); |
63e2ed36 | 467 | trace_hrtimer_start(timer, mode); |
c6a2a177 XG |
468 | } |
469 | ||
470 | static inline void debug_deactivate(struct hrtimer *timer) | |
471 | { | |
472 | debug_hrtimer_deactivate(timer); | |
473 | trace_hrtimer_cancel(timer); | |
474 | } | |
475 | ||
c272ca58 AMG |
476 | static struct hrtimer_clock_base * |
477 | __next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active) | |
478 | { | |
479 | unsigned int idx; | |
480 | ||
481 | if (!*active) | |
482 | return NULL; | |
483 | ||
484 | idx = __ffs(*active); | |
485 | *active &= ~(1U << idx); | |
486 | ||
487 | return &cpu_base->clock_base[idx]; | |
488 | } | |
489 | ||
490 | #define for_each_active_base(base, cpu_base, active) \ | |
491 | while ((base = __next_base((cpu_base), &(active)))) | |
492 | ||
ad38f596 | 493 | static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base, |
a59855cd | 494 | const struct hrtimer *exclude, |
ad38f596 AMG |
495 | unsigned int active, |
496 | ktime_t expires_next) | |
9bc74919 | 497 | { |
c272ca58 | 498 | struct hrtimer_clock_base *base; |
ad38f596 | 499 | ktime_t expires; |
9bc74919 | 500 | |
c272ca58 | 501 | for_each_active_base(base, cpu_base, active) { |
9bc74919 TG |
502 | struct timerqueue_node *next; |
503 | struct hrtimer *timer; | |
504 | ||
34aee88a | 505 | next = timerqueue_getnext(&base->active); |
9bc74919 | 506 | timer = container_of(next, struct hrtimer, node); |
a59855cd RW |
507 | if (timer == exclude) { |
508 | /* Get to the next timer in the queue. */ | |
7d2f6abb | 509 | next = timerqueue_iterate_next(next); |
a59855cd RW |
510 | if (!next) |
511 | continue; | |
512 | ||
513 | timer = container_of(next, struct hrtimer, node); | |
514 | } | |
9bc74919 | 515 | expires = ktime_sub(hrtimer_get_expires(timer), base->offset); |
2456e855 | 516 | if (expires < expires_next) { |
9bc74919 | 517 | expires_next = expires; |
a59855cd RW |
518 | |
519 | /* Skip cpu_base update if a timer is being excluded. */ | |
520 | if (exclude) | |
521 | continue; | |
522 | ||
5da70160 AMG |
523 | if (timer->is_soft) |
524 | cpu_base->softirq_next_timer = timer; | |
525 | else | |
526 | cpu_base->next_timer = timer; | |
895bdfa7 | 527 | } |
9bc74919 TG |
528 | } |
529 | /* | |
530 | * clock_was_set() might have changed base->offset of any of | |
531 | * the clock bases so the result might be negative. Fix it up | |
532 | * to prevent a false positive in clockevents_program_event(). | |
533 | */ | |
2456e855 TG |
534 | if (expires_next < 0) |
535 | expires_next = 0; | |
9bc74919 TG |
536 | return expires_next; |
537 | } | |
9bc74919 | 538 | |
c458b1d1 AMG |
539 | /* |
540 | * Recomputes cpu_base::*next_timer and returns the earliest expires_next but | |
541 | * does not set cpu_base::*expires_next, that is done by hrtimer_reprogram. | |
542 | * | |
5da70160 AMG |
543 | * When a softirq is pending, we can ignore the HRTIMER_ACTIVE_SOFT bases, |
544 | * those timers will get run whenever the softirq gets handled, at the end of | |
545 | * hrtimer_run_softirq(), hrtimer_update_softirq_timer() will re-add these bases. | |
546 | * | |
547 | * Therefore softirq values are those from the HRTIMER_ACTIVE_SOFT clock bases. | |
548 | * The !softirq values are the minima across HRTIMER_ACTIVE_ALL, unless an actual | |
549 | * softirq is pending, in which case they're the minima of HRTIMER_ACTIVE_HARD. | |
550 | * | |
c458b1d1 | 551 | * @active_mask must be one of: |
5da70160 | 552 | * - HRTIMER_ACTIVE_ALL, |
c458b1d1 AMG |
553 | * - HRTIMER_ACTIVE_SOFT, or |
554 | * - HRTIMER_ACTIVE_HARD. | |
555 | */ | |
5da70160 AMG |
556 | static ktime_t |
557 | __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_mask) | |
ad38f596 | 558 | { |
c458b1d1 | 559 | unsigned int active; |
5da70160 | 560 | struct hrtimer *next_timer = NULL; |
ad38f596 AMG |
561 | ktime_t expires_next = KTIME_MAX; |
562 | ||
5da70160 AMG |
563 | if (!cpu_base->softirq_activated && (active_mask & HRTIMER_ACTIVE_SOFT)) { |
564 | active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT; | |
565 | cpu_base->softirq_next_timer = NULL; | |
a59855cd RW |
566 | expires_next = __hrtimer_next_event_base(cpu_base, NULL, |
567 | active, KTIME_MAX); | |
5da70160 AMG |
568 | |
569 | next_timer = cpu_base->softirq_next_timer; | |
570 | } | |
ad38f596 | 571 | |
5da70160 AMG |
572 | if (active_mask & HRTIMER_ACTIVE_HARD) { |
573 | active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD; | |
574 | cpu_base->next_timer = next_timer; | |
a59855cd RW |
575 | expires_next = __hrtimer_next_event_base(cpu_base, NULL, active, |
576 | expires_next); | |
5da70160 | 577 | } |
ad38f596 AMG |
578 | |
579 | return expires_next; | |
580 | } | |
581 | ||
21d6d52a TG |
582 | static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) |
583 | { | |
584 | ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; | |
a3ed0e43 | 585 | ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset; |
21d6d52a TG |
586 | ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset; |
587 | ||
5da70160 | 588 | ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq, |
a3ed0e43 | 589 | offs_real, offs_boot, offs_tai); |
5da70160 AMG |
590 | |
591 | base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real; | |
a3ed0e43 | 592 | base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot; |
5da70160 AMG |
593 | base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai; |
594 | ||
595 | return now; | |
21d6d52a TG |
596 | } |
597 | ||
28bfd18b AMG |
598 | /* |
599 | * Is the high resolution mode active ? | |
600 | */ | |
601 | static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base) | |
602 | { | |
603 | return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ? | |
604 | cpu_base->hres_active : 0; | |
605 | } | |
606 | ||
607 | static inline int hrtimer_hres_active(void) | |
608 | { | |
609 | return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases)); | |
610 | } | |
611 | ||
54cdfdb4 TG |
612 | /* |
613 | * Reprogram the event source with checking both queues for the | |
614 | * next event | |
615 | * Called with interrupts disabled and base->lock held | |
616 | */ | |
7403f41f AC |
617 | static void |
618 | hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) | |
54cdfdb4 | 619 | { |
21d6d52a TG |
620 | ktime_t expires_next; |
621 | ||
5da70160 AMG |
622 | /* |
623 | * Find the current next expiration time. | |
624 | */ | |
625 | expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL); | |
626 | ||
627 | if (cpu_base->next_timer && cpu_base->next_timer->is_soft) { | |
628 | /* | |
629 | * When the softirq is activated, hrtimer has to be | |
630 | * programmed with the first hard hrtimer because soft | |
631 | * timer interrupt could occur too late. | |
632 | */ | |
633 | if (cpu_base->softirq_activated) | |
634 | expires_next = __hrtimer_get_next_event(cpu_base, | |
635 | HRTIMER_ACTIVE_HARD); | |
636 | else | |
637 | cpu_base->softirq_expires_next = expires_next; | |
638 | } | |
54cdfdb4 | 639 | |
2456e855 | 640 | if (skip_equal && expires_next == cpu_base->expires_next) |
7403f41f AC |
641 | return; |
642 | ||
2456e855 | 643 | cpu_base->expires_next = expires_next; |
7403f41f | 644 | |
6c6c0d5a | 645 | /* |
61bb4bcb AMG |
646 | * If hres is not active, hardware does not have to be |
647 | * reprogrammed yet. | |
648 | * | |
6c6c0d5a SH |
649 | * If a hang was detected in the last timer interrupt then we |
650 | * leave the hang delay active in the hardware. We want the | |
651 | * system to make progress. That also prevents the following | |
652 | * scenario: | |
653 | * T1 expires 50ms from now | |
654 | * T2 expires 5s from now | |
655 | * | |
656 | * T1 is removed, so this code is called and would reprogram | |
657 | * the hardware to 5s from now. Any hrtimer_start after that | |
658 | * will not reprogram the hardware due to hang_detected being | |
659 | * set. So we'd effectivly block all timers until the T2 event | |
660 | * fires. | |
661 | */ | |
61bb4bcb | 662 | if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected) |
6c6c0d5a SH |
663 | return; |
664 | ||
d2540875 | 665 | tick_program_event(cpu_base->expires_next, 1); |
54cdfdb4 TG |
666 | } |
667 | ||
ebba2c72 AMG |
668 | /* High resolution timer related functions */ |
669 | #ifdef CONFIG_HIGH_RES_TIMERS | |
670 | ||
671 | /* | |
672 | * High resolution timer enabled ? | |
673 | */ | |
674 | static bool hrtimer_hres_enabled __read_mostly = true; | |
675 | unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC; | |
676 | EXPORT_SYMBOL_GPL(hrtimer_resolution); | |
677 | ||
678 | /* | |
679 | * Enable / Disable high resolution mode | |
680 | */ | |
681 | static int __init setup_hrtimer_hres(char *str) | |
682 | { | |
683 | return (kstrtobool(str, &hrtimer_hres_enabled) == 0); | |
684 | } | |
685 | ||
686 | __setup("highres=", setup_hrtimer_hres); | |
687 | ||
688 | /* | |
689 | * hrtimer_high_res_enabled - query, if the highres mode is enabled | |
690 | */ | |
691 | static inline int hrtimer_is_hres_enabled(void) | |
692 | { | |
693 | return hrtimer_hres_enabled; | |
694 | } | |
695 | ||
9ec26907 TG |
696 | /* |
697 | * Retrigger next event is called after clock was set | |
698 | * | |
699 | * Called with interrupts disabled via on_each_cpu() | |
700 | */ | |
701 | static void retrigger_next_event(void *arg) | |
702 | { | |
dc5df73b | 703 | struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases); |
9ec26907 | 704 | |
851cff8c | 705 | if (!__hrtimer_hres_active(base)) |
9ec26907 TG |
706 | return; |
707 | ||
9ec26907 | 708 | raw_spin_lock(&base->lock); |
5baefd6d | 709 | hrtimer_update_base(base); |
9ec26907 TG |
710 | hrtimer_force_reprogram(base, 0); |
711 | raw_spin_unlock(&base->lock); | |
712 | } | |
b12a03ce | 713 | |
54cdfdb4 TG |
714 | /* |
715 | * Switch to high resolution mode | |
716 | */ | |
75e3b37d | 717 | static void hrtimer_switch_to_hres(void) |
54cdfdb4 | 718 | { |
c6eb3f70 | 719 | struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases); |
54cdfdb4 TG |
720 | |
721 | if (tick_init_highres()) { | |
7a6e5537 GU |
722 | pr_warn("Could not switch to high resolution mode on CPU %u\n", |
723 | base->cpu); | |
85e1cd6e | 724 | return; |
54cdfdb4 TG |
725 | } |
726 | base->hres_active = 1; | |
398ca17f | 727 | hrtimer_resolution = HIGH_RES_NSEC; |
54cdfdb4 TG |
728 | |
729 | tick_setup_sched_timer(); | |
54cdfdb4 TG |
730 | /* "Retrigger" the interrupt to get things going */ |
731 | retrigger_next_event(NULL); | |
54cdfdb4 TG |
732 | } |
733 | ||
5ec2481b TG |
734 | static void clock_was_set_work(struct work_struct *work) |
735 | { | |
736 | clock_was_set(); | |
737 | } | |
738 | ||
739 | static DECLARE_WORK(hrtimer_work, clock_was_set_work); | |
740 | ||
f55a6faa | 741 | /* |
b4d90e9f | 742 | * Called from timekeeping and resume code to reprogram the hrtimer |
5ec2481b | 743 | * interrupt device on all cpus. |
f55a6faa JS |
744 | */ |
745 | void clock_was_set_delayed(void) | |
746 | { | |
5ec2481b | 747 | schedule_work(&hrtimer_work); |
f55a6faa JS |
748 | } |
749 | ||
54cdfdb4 TG |
750 | #else |
751 | ||
54cdfdb4 | 752 | static inline int hrtimer_is_hres_enabled(void) { return 0; } |
75e3b37d | 753 | static inline void hrtimer_switch_to_hres(void) { } |
9ec26907 | 754 | static inline void retrigger_next_event(void *arg) { } |
54cdfdb4 TG |
755 | |
756 | #endif /* CONFIG_HIGH_RES_TIMERS */ | |
757 | ||
11a9fe06 AMG |
758 | /* |
759 | * When a timer is enqueued and expires earlier than the already enqueued | |
760 | * timers, we have to check, whether it expires earlier than the timer for | |
761 | * which the clock event device was armed. | |
762 | * | |
763 | * Called with interrupts disabled and base->cpu_base.lock held | |
764 | */ | |
5da70160 | 765 | static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram) |
11a9fe06 AMG |
766 | { |
767 | struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); | |
3ec7a3ee | 768 | struct hrtimer_clock_base *base = timer->base; |
11a9fe06 AMG |
769 | ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); |
770 | ||
771 | WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0); | |
772 | ||
5da70160 AMG |
773 | /* |
774 | * CLOCK_REALTIME timer might be requested with an absolute | |
775 | * expiry time which is less than base->offset. Set it to 0. | |
776 | */ | |
777 | if (expires < 0) | |
778 | expires = 0; | |
779 | ||
780 | if (timer->is_soft) { | |
781 | /* | |
782 | * soft hrtimer could be started on a remote CPU. In this | |
783 | * case softirq_expires_next needs to be updated on the | |
784 | * remote CPU. The soft hrtimer will not expire before the | |
785 | * first hard hrtimer on the remote CPU - | |
786 | * hrtimer_check_target() prevents this case. | |
787 | */ | |
788 | struct hrtimer_cpu_base *timer_cpu_base = base->cpu_base; | |
789 | ||
790 | if (timer_cpu_base->softirq_activated) | |
791 | return; | |
792 | ||
793 | if (!ktime_before(expires, timer_cpu_base->softirq_expires_next)) | |
794 | return; | |
795 | ||
796 | timer_cpu_base->softirq_next_timer = timer; | |
797 | timer_cpu_base->softirq_expires_next = expires; | |
798 | ||
799 | if (!ktime_before(expires, timer_cpu_base->expires_next) || | |
800 | !reprogram) | |
801 | return; | |
802 | } | |
803 | ||
11a9fe06 AMG |
804 | /* |
805 | * If the timer is not on the current cpu, we cannot reprogram | |
806 | * the other cpus clock event device. | |
807 | */ | |
808 | if (base->cpu_base != cpu_base) | |
809 | return; | |
810 | ||
811 | /* | |
812 | * If the hrtimer interrupt is running, then it will | |
813 | * reevaluate the clock bases and reprogram the clock event | |
814 | * device. The callbacks are always executed in hard interrupt | |
815 | * context so we don't need an extra check for a running | |
816 | * callback. | |
817 | */ | |
818 | if (cpu_base->in_hrtirq) | |
819 | return; | |
820 | ||
11a9fe06 AMG |
821 | if (expires >= cpu_base->expires_next) |
822 | return; | |
823 | ||
824 | /* Update the pointer to the next expiring timer */ | |
825 | cpu_base->next_timer = timer; | |
14c80341 | 826 | cpu_base->expires_next = expires; |
11a9fe06 AMG |
827 | |
828 | /* | |
14c80341 AMG |
829 | * If hres is not active, hardware does not have to be |
830 | * programmed yet. | |
831 | * | |
11a9fe06 AMG |
832 | * If a hang was detected in the last timer interrupt then we |
833 | * do not schedule a timer which is earlier than the expiry | |
834 | * which we enforced in the hang detection. We want the system | |
835 | * to make progress. | |
836 | */ | |
14c80341 | 837 | if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected) |
11a9fe06 AMG |
838 | return; |
839 | ||
840 | /* | |
841 | * Program the timer hardware. We enforce the expiry for | |
842 | * events which are already in the past. | |
843 | */ | |
11a9fe06 AMG |
844 | tick_program_event(expires, 1); |
845 | } | |
846 | ||
b12a03ce TG |
847 | /* |
848 | * Clock realtime was set | |
849 | * | |
850 | * Change the offset of the realtime clock vs. the monotonic | |
851 | * clock. | |
852 | * | |
853 | * We might have to reprogram the high resolution timer interrupt. On | |
854 | * SMP we call the architecture specific code to retrigger _all_ high | |
855 | * resolution timer interrupts. On UP we just disable interrupts and | |
856 | * call the high resolution interrupt code. | |
857 | */ | |
858 | void clock_was_set(void) | |
859 | { | |
90ff1f30 | 860 | #ifdef CONFIG_HIGH_RES_TIMERS |
b12a03ce TG |
861 | /* Retrigger the CPU local events everywhere */ |
862 | on_each_cpu(retrigger_next_event, NULL, 1); | |
9ec26907 TG |
863 | #endif |
864 | timerfd_clock_was_set(); | |
b12a03ce TG |
865 | } |
866 | ||
867 | /* | |
868 | * During resume we might have to reprogram the high resolution timer | |
7c4c3a0f DV |
869 | * interrupt on all online CPUs. However, all other CPUs will be |
870 | * stopped with IRQs interrupts disabled so the clock_was_set() call | |
5ec2481b | 871 | * must be deferred. |
b12a03ce TG |
872 | */ |
873 | void hrtimers_resume(void) | |
874 | { | |
53bef3fd | 875 | lockdep_assert_irqs_disabled(); |
5ec2481b | 876 | /* Retrigger on the local CPU */ |
b12a03ce | 877 | retrigger_next_event(NULL); |
5ec2481b TG |
878 | /* And schedule a retrigger for all others */ |
879 | clock_was_set_delayed(); | |
b12a03ce TG |
880 | } |
881 | ||
c0a31329 | 882 | /* |
6506f2aa | 883 | * Counterpart to lock_hrtimer_base above: |
c0a31329 TG |
884 | */ |
885 | static inline | |
886 | void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | |
887 | { | |
ecb49d1a | 888 | raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); |
c0a31329 TG |
889 | } |
890 | ||
891 | /** | |
892 | * hrtimer_forward - forward the timer expiry | |
c0a31329 | 893 | * @timer: hrtimer to forward |
44f21475 | 894 | * @now: forward past this time |
c0a31329 TG |
895 | * @interval: the interval to forward |
896 | * | |
897 | * Forward the timer expiry so it will expire in the future. | |
8dca6f33 | 898 | * Returns the number of overruns. |
91e5a217 TG |
899 | * |
900 | * Can be safely called from the callback function of @timer. If | |
901 | * called from other contexts @timer must neither be enqueued nor | |
902 | * running the callback and the caller needs to take care of | |
903 | * serialization. | |
904 | * | |
905 | * Note: This only updates the timer expiry value and does not requeue | |
906 | * the timer. | |
c0a31329 | 907 | */ |
4d672e7a | 908 | u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) |
c0a31329 | 909 | { |
4d672e7a | 910 | u64 orun = 1; |
44f21475 | 911 | ktime_t delta; |
c0a31329 | 912 | |
cc584b21 | 913 | delta = ktime_sub(now, hrtimer_get_expires(timer)); |
c0a31329 | 914 | |
2456e855 | 915 | if (delta < 0) |
c0a31329 TG |
916 | return 0; |
917 | ||
5de2755c PZ |
918 | if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED)) |
919 | return 0; | |
920 | ||
2456e855 TG |
921 | if (interval < hrtimer_resolution) |
922 | interval = hrtimer_resolution; | |
c9db4fa1 | 923 | |
2456e855 | 924 | if (unlikely(delta >= interval)) { |
df869b63 | 925 | s64 incr = ktime_to_ns(interval); |
c0a31329 TG |
926 | |
927 | orun = ktime_divns(delta, incr); | |
cc584b21 | 928 | hrtimer_add_expires_ns(timer, incr * orun); |
2456e855 | 929 | if (hrtimer_get_expires_tv64(timer) > now) |
c0a31329 TG |
930 | return orun; |
931 | /* | |
932 | * This (and the ktime_add() below) is the | |
933 | * correction for exact: | |
934 | */ | |
935 | orun++; | |
936 | } | |
cc584b21 | 937 | hrtimer_add_expires(timer, interval); |
c0a31329 TG |
938 | |
939 | return orun; | |
940 | } | |
6bdb6b62 | 941 | EXPORT_SYMBOL_GPL(hrtimer_forward); |
c0a31329 TG |
942 | |
943 | /* | |
944 | * enqueue_hrtimer - internal function to (re)start a timer | |
945 | * | |
946 | * The timer is inserted in expiry order. Insertion into the | |
947 | * red black tree is O(log(n)). Must hold the base lock. | |
a6037b61 PZ |
948 | * |
949 | * Returns 1 when the new timer is the leftmost timer in the tree. | |
c0a31329 | 950 | */ |
a6037b61 | 951 | static int enqueue_hrtimer(struct hrtimer *timer, |
63e2ed36 AMG |
952 | struct hrtimer_clock_base *base, |
953 | enum hrtimer_mode mode) | |
c0a31329 | 954 | { |
63e2ed36 | 955 | debug_activate(timer, mode); |
237fc6e7 | 956 | |
ab8177bc | 957 | base->cpu_base->active_bases |= 1 << base->index; |
54cdfdb4 | 958 | |
887d9dc9 | 959 | timer->state = HRTIMER_STATE_ENQUEUED; |
a6037b61 | 960 | |
b97f44c9 | 961 | return timerqueue_add(&base->active, &timer->node); |
288867ec | 962 | } |
c0a31329 TG |
963 | |
964 | /* | |
965 | * __remove_hrtimer - internal function to remove a timer | |
966 | * | |
967 | * Caller must hold the base lock. | |
54cdfdb4 TG |
968 | * |
969 | * High resolution timer mode reprograms the clock event device when the | |
970 | * timer is the one which expires next. The caller can disable this by setting | |
971 | * reprogram to zero. This is useful, when the context does a reprogramming | |
972 | * anyway (e.g. timer interrupt) | |
c0a31329 | 973 | */ |
3c8aa39d | 974 | static void __remove_hrtimer(struct hrtimer *timer, |
303e967f | 975 | struct hrtimer_clock_base *base, |
203cbf77 | 976 | u8 newstate, int reprogram) |
c0a31329 | 977 | { |
e19ffe8b | 978 | struct hrtimer_cpu_base *cpu_base = base->cpu_base; |
203cbf77 | 979 | u8 state = timer->state; |
e19ffe8b | 980 | |
895bdfa7 TG |
981 | timer->state = newstate; |
982 | if (!(state & HRTIMER_STATE_ENQUEUED)) | |
983 | return; | |
7403f41f | 984 | |
b97f44c9 | 985 | if (!timerqueue_del(&base->active, &timer->node)) |
e19ffe8b | 986 | cpu_base->active_bases &= ~(1 << base->index); |
7403f41f | 987 | |
895bdfa7 TG |
988 | /* |
989 | * Note: If reprogram is false we do not update | |
990 | * cpu_base->next_timer. This happens when we remove the first | |
991 | * timer on a remote cpu. No harm as we never dereference | |
992 | * cpu_base->next_timer. So the worst thing what can happen is | |
993 | * an superflous call to hrtimer_force_reprogram() on the | |
994 | * remote cpu later on if the same timer gets enqueued again. | |
995 | */ | |
996 | if (reprogram && timer == cpu_base->next_timer) | |
997 | hrtimer_force_reprogram(cpu_base, 1); | |
c0a31329 TG |
998 | } |
999 | ||
1000 | /* | |
1001 | * remove hrtimer, called with base lock held | |
1002 | */ | |
1003 | static inline int | |
8edfb036 | 1004 | remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart) |
c0a31329 | 1005 | { |
303e967f | 1006 | if (hrtimer_is_queued(timer)) { |
203cbf77 | 1007 | u8 state = timer->state; |
54cdfdb4 TG |
1008 | int reprogram; |
1009 | ||
1010 | /* | |
1011 | * Remove the timer and force reprogramming when high | |
1012 | * resolution mode is active and the timer is on the current | |
1013 | * CPU. If we remove a timer on another CPU, reprogramming is | |
1014 | * skipped. The interrupt event on this CPU is fired and | |
1015 | * reprogramming happens in the interrupt handler. This is a | |
1016 | * rare case and less expensive than a smp call. | |
1017 | */ | |
c6a2a177 | 1018 | debug_deactivate(timer); |
dc5df73b | 1019 | reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases); |
8edfb036 | 1020 | |
887d9dc9 PZ |
1021 | if (!restart) |
1022 | state = HRTIMER_STATE_INACTIVE; | |
1023 | ||
f13d4f97 | 1024 | __remove_hrtimer(timer, base, state, reprogram); |
c0a31329 TG |
1025 | return 1; |
1026 | } | |
1027 | return 0; | |
1028 | } | |
1029 | ||
203cbf77 TG |
1030 | static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim, |
1031 | const enum hrtimer_mode mode) | |
1032 | { | |
1033 | #ifdef CONFIG_TIME_LOW_RES | |
1034 | /* | |
1035 | * CONFIG_TIME_LOW_RES indicates that the system has no way to return | |
1036 | * granular time values. For relative timers we add hrtimer_resolution | |
1037 | * (i.e. one jiffie) to prevent short timeouts. | |
1038 | */ | |
1039 | timer->is_rel = mode & HRTIMER_MODE_REL; | |
1040 | if (timer->is_rel) | |
8b0e1953 | 1041 | tim = ktime_add_safe(tim, hrtimer_resolution); |
203cbf77 TG |
1042 | #endif |
1043 | return tim; | |
1044 | } | |
1045 | ||
5da70160 AMG |
1046 | static void |
1047 | hrtimer_update_softirq_timer(struct hrtimer_cpu_base *cpu_base, bool reprogram) | |
1048 | { | |
1049 | ktime_t expires; | |
1050 | ||
1051 | /* | |
1052 | * Find the next SOFT expiration. | |
1053 | */ | |
1054 | expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT); | |
1055 | ||
1056 | /* | |
1057 | * reprogramming needs to be triggered, even if the next soft | |
1058 | * hrtimer expires at the same time than the next hard | |
1059 | * hrtimer. cpu_base->softirq_expires_next needs to be updated! | |
1060 | */ | |
1061 | if (expires == KTIME_MAX) | |
1062 | return; | |
1063 | ||
1064 | /* | |
1065 | * cpu_base->*next_timer is recomputed by __hrtimer_get_next_event() | |
1066 | * cpu_base->*expires_next is only set by hrtimer_reprogram() | |
1067 | */ | |
1068 | hrtimer_reprogram(cpu_base->softirq_next_timer, reprogram); | |
1069 | } | |
1070 | ||
138a6b7a AMG |
1071 | static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, |
1072 | u64 delta_ns, const enum hrtimer_mode mode, | |
1073 | struct hrtimer_clock_base *base) | |
c0a31329 | 1074 | { |
138a6b7a | 1075 | struct hrtimer_clock_base *new_base; |
c0a31329 TG |
1076 | |
1077 | /* Remove an active timer from the queue: */ | |
8edfb036 | 1078 | remove_hrtimer(timer, base, true); |
c0a31329 | 1079 | |
203cbf77 | 1080 | if (mode & HRTIMER_MODE_REL) |
84ea7fe3 | 1081 | tim = ktime_add_safe(tim, base->get_time()); |
203cbf77 TG |
1082 | |
1083 | tim = hrtimer_update_lowres(timer, tim, mode); | |
237fc6e7 | 1084 | |
da8f2e17 | 1085 | hrtimer_set_expires_range_ns(timer, tim, delta_ns); |
c0a31329 | 1086 | |
84ea7fe3 VK |
1087 | /* Switch the timer base, if necessary: */ |
1088 | new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); | |
1089 | ||
138a6b7a AMG |
1090 | return enqueue_hrtimer(timer, new_base, mode); |
1091 | } | |
5da70160 | 1092 | |
138a6b7a AMG |
1093 | /** |
1094 | * hrtimer_start_range_ns - (re)start an hrtimer | |
1095 | * @timer: the timer to be added | |
1096 | * @tim: expiry time | |
1097 | * @delta_ns: "slack" range for the timer | |
1098 | * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or | |
5da70160 AMG |
1099 | * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED); |
1100 | * softirq based mode is considered for debug purpose only! | |
138a6b7a AMG |
1101 | */ |
1102 | void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | |
1103 | u64 delta_ns, const enum hrtimer_mode mode) | |
1104 | { | |
1105 | struct hrtimer_clock_base *base; | |
1106 | unsigned long flags; | |
1107 | ||
5da70160 AMG |
1108 | /* |
1109 | * Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft | |
0ab6a3dd TG |
1110 | * match on CONFIG_PREEMPT_RT = n. With PREEMPT_RT check the hard |
1111 | * expiry mode because unmarked timers are moved to softirq expiry. | |
5da70160 | 1112 | */ |
0ab6a3dd TG |
1113 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
1114 | WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft); | |
1115 | else | |
1116 | WARN_ON_ONCE(!(mode & HRTIMER_MODE_HARD) ^ !timer->is_hard); | |
5da70160 | 1117 | |
138a6b7a AMG |
1118 | base = lock_hrtimer_base(timer, &flags); |
1119 | ||
1120 | if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base)) | |
5da70160 | 1121 | hrtimer_reprogram(timer, true); |
49a2a075 | 1122 | |
c0a31329 | 1123 | unlock_hrtimer_base(timer, &flags); |
7f1e2ca9 | 1124 | } |
da8f2e17 AV |
1125 | EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); |
1126 | ||
c0a31329 TG |
1127 | /** |
1128 | * hrtimer_try_to_cancel - try to deactivate a timer | |
c0a31329 TG |
1129 | * @timer: hrtimer to stop |
1130 | * | |
1131 | * Returns: | |
51633704 MCC |
1132 | * |
1133 | * * 0 when the timer was not active | |
1134 | * * 1 when the timer was active | |
1135 | * * -1 when the timer is currently executing the callback function and | |
fa9799e3 | 1136 | * cannot be stopped |
c0a31329 TG |
1137 | */ |
1138 | int hrtimer_try_to_cancel(struct hrtimer *timer) | |
1139 | { | |
3c8aa39d | 1140 | struct hrtimer_clock_base *base; |
c0a31329 TG |
1141 | unsigned long flags; |
1142 | int ret = -1; | |
1143 | ||
19d9f422 TG |
1144 | /* |
1145 | * Check lockless first. If the timer is not active (neither | |
1146 | * enqueued nor running the callback, nothing to do here. The | |
1147 | * base lock does not serialize against a concurrent enqueue, | |
1148 | * so we can avoid taking it. | |
1149 | */ | |
1150 | if (!hrtimer_active(timer)) | |
1151 | return 0; | |
1152 | ||
c0a31329 TG |
1153 | base = lock_hrtimer_base(timer, &flags); |
1154 | ||
303e967f | 1155 | if (!hrtimer_callback_running(timer)) |
8edfb036 | 1156 | ret = remove_hrtimer(timer, base, false); |
c0a31329 TG |
1157 | |
1158 | unlock_hrtimer_base(timer, &flags); | |
1159 | ||
1160 | return ret; | |
1161 | ||
1162 | } | |
8d16b764 | 1163 | EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel); |
c0a31329 TG |
1164 | |
1165 | /** | |
1166 | * hrtimer_cancel - cancel a timer and wait for the handler to finish. | |
c0a31329 TG |
1167 | * @timer: the timer to be cancelled |
1168 | * | |
1169 | * Returns: | |
1170 | * 0 when the timer was not active | |
1171 | * 1 when the timer was active | |
1172 | */ | |
1173 | int hrtimer_cancel(struct hrtimer *timer) | |
1174 | { | |
1175 | for (;;) { | |
1176 | int ret = hrtimer_try_to_cancel(timer); | |
1177 | ||
1178 | if (ret >= 0) | |
1179 | return ret; | |
5ef37b19 | 1180 | cpu_relax(); |
c0a31329 TG |
1181 | } |
1182 | } | |
8d16b764 | 1183 | EXPORT_SYMBOL_GPL(hrtimer_cancel); |
c0a31329 TG |
1184 | |
1185 | /** | |
1186 | * hrtimer_get_remaining - get remaining time for the timer | |
c0a31329 | 1187 | * @timer: the timer to read |
203cbf77 | 1188 | * @adjust: adjust relative timers when CONFIG_TIME_LOW_RES=y |
c0a31329 | 1189 | */ |
203cbf77 | 1190 | ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust) |
c0a31329 | 1191 | { |
c0a31329 TG |
1192 | unsigned long flags; |
1193 | ktime_t rem; | |
1194 | ||
b3bd3de6 | 1195 | lock_hrtimer_base(timer, &flags); |
203cbf77 TG |
1196 | if (IS_ENABLED(CONFIG_TIME_LOW_RES) && adjust) |
1197 | rem = hrtimer_expires_remaining_adjusted(timer); | |
1198 | else | |
1199 | rem = hrtimer_expires_remaining(timer); | |
c0a31329 TG |
1200 | unlock_hrtimer_base(timer, &flags); |
1201 | ||
1202 | return rem; | |
1203 | } | |
203cbf77 | 1204 | EXPORT_SYMBOL_GPL(__hrtimer_get_remaining); |
c0a31329 | 1205 | |
3451d024 | 1206 | #ifdef CONFIG_NO_HZ_COMMON |
69239749 TL |
1207 | /** |
1208 | * hrtimer_get_next_event - get the time until next expiry event | |
1209 | * | |
c1ad348b | 1210 | * Returns the next expiry time or KTIME_MAX if no timer is pending. |
69239749 | 1211 | */ |
c1ad348b | 1212 | u64 hrtimer_get_next_event(void) |
69239749 | 1213 | { |
dc5df73b | 1214 | struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); |
c1ad348b | 1215 | u64 expires = KTIME_MAX; |
69239749 | 1216 | unsigned long flags; |
69239749 | 1217 | |
ecb49d1a | 1218 | raw_spin_lock_irqsave(&cpu_base->lock, flags); |
3c8aa39d | 1219 | |
e19ffe8b | 1220 | if (!__hrtimer_hres_active(cpu_base)) |
5da70160 | 1221 | expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL); |
3c8aa39d | 1222 | |
ecb49d1a | 1223 | raw_spin_unlock_irqrestore(&cpu_base->lock, flags); |
3c8aa39d | 1224 | |
c1ad348b | 1225 | return expires; |
69239749 | 1226 | } |
a59855cd RW |
1227 | |
1228 | /** | |
1229 | * hrtimer_next_event_without - time until next expiry event w/o one timer | |
1230 | * @exclude: timer to exclude | |
1231 | * | |
1232 | * Returns the next expiry time over all timers except for the @exclude one or | |
1233 | * KTIME_MAX if none of them is pending. | |
1234 | */ | |
1235 | u64 hrtimer_next_event_without(const struct hrtimer *exclude) | |
1236 | { | |
1237 | struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); | |
1238 | u64 expires = KTIME_MAX; | |
1239 | unsigned long flags; | |
1240 | ||
1241 | raw_spin_lock_irqsave(&cpu_base->lock, flags); | |
1242 | ||
1243 | if (__hrtimer_hres_active(cpu_base)) { | |
1244 | unsigned int active; | |
1245 | ||
1246 | if (!cpu_base->softirq_activated) { | |
1247 | active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT; | |
1248 | expires = __hrtimer_next_event_base(cpu_base, exclude, | |
1249 | active, KTIME_MAX); | |
1250 | } | |
1251 | active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD; | |
1252 | expires = __hrtimer_next_event_base(cpu_base, exclude, active, | |
1253 | expires); | |
1254 | } | |
1255 | ||
1256 | raw_spin_unlock_irqrestore(&cpu_base->lock, flags); | |
1257 | ||
1258 | return expires; | |
1259 | } | |
69239749 TL |
1260 | #endif |
1261 | ||
336a9cde MZ |
1262 | static inline int hrtimer_clockid_to_base(clockid_t clock_id) |
1263 | { | |
1264 | if (likely(clock_id < MAX_CLOCKS)) { | |
1265 | int base = hrtimer_clock_to_base_table[clock_id]; | |
1266 | ||
1267 | if (likely(base != HRTIMER_MAX_CLOCK_BASES)) | |
1268 | return base; | |
1269 | } | |
1270 | WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id); | |
1271 | return HRTIMER_BASE_MONOTONIC; | |
1272 | } | |
1273 | ||
237fc6e7 TG |
1274 | static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, |
1275 | enum hrtimer_mode mode) | |
c0a31329 | 1276 | { |
42f42da4 | 1277 | bool softtimer = !!(mode & HRTIMER_MODE_SOFT); |
3c8aa39d | 1278 | struct hrtimer_cpu_base *cpu_base; |
f5c2f021 SAS |
1279 | int base; |
1280 | ||
1281 | /* | |
1282 | * On PREEMPT_RT enabled kernels hrtimers which are not explicitely | |
1283 | * marked for hard interrupt expiry mode are moved into soft | |
1284 | * interrupt context for latency reasons and because the callbacks | |
1285 | * can invoke functions which might sleep on RT, e.g. spin_lock(). | |
1286 | */ | |
1287 | if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(mode & HRTIMER_MODE_HARD)) | |
1288 | softtimer = true; | |
c0a31329 | 1289 | |
7978672c GA |
1290 | memset(timer, 0, sizeof(struct hrtimer)); |
1291 | ||
22127e93 | 1292 | cpu_base = raw_cpu_ptr(&hrtimer_bases); |
c0a31329 | 1293 | |
48d0c9be AMG |
1294 | /* |
1295 | * POSIX magic: Relative CLOCK_REALTIME timers are not affected by | |
1296 | * clock modifications, so they needs to become CLOCK_MONOTONIC to | |
1297 | * ensure POSIX compliance. | |
1298 | */ | |
1299 | if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL) | |
7978672c GA |
1300 | clock_id = CLOCK_MONOTONIC; |
1301 | ||
f5c2f021 | 1302 | base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0; |
42f42da4 AMG |
1303 | base += hrtimer_clockid_to_base(clock_id); |
1304 | timer->is_soft = softtimer; | |
0ab6a3dd | 1305 | timer->is_hard = !softtimer; |
e06383db | 1306 | timer->base = &cpu_base->clock_base[base]; |
998adc3d | 1307 | timerqueue_init(&timer->node); |
c0a31329 | 1308 | } |
237fc6e7 TG |
1309 | |
1310 | /** | |
1311 | * hrtimer_init - initialize a timer to the given clock | |
1312 | * @timer: the timer to be initialized | |
1313 | * @clock_id: the clock to be used | |
42f42da4 AMG |
1314 | * @mode: The modes which are relevant for intitialization: |
1315 | * HRTIMER_MODE_ABS, HRTIMER_MODE_REL, HRTIMER_MODE_ABS_SOFT, | |
1316 | * HRTIMER_MODE_REL_SOFT | |
1317 | * | |
1318 | * The PINNED variants of the above can be handed in, | |
1319 | * but the PINNED bit is ignored as pinning happens | |
1320 | * when the hrtimer is started | |
237fc6e7 TG |
1321 | */ |
1322 | void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | |
1323 | enum hrtimer_mode mode) | |
1324 | { | |
c6a2a177 | 1325 | debug_init(timer, clock_id, mode); |
237fc6e7 TG |
1326 | __hrtimer_init(timer, clock_id, mode); |
1327 | } | |
8d16b764 | 1328 | EXPORT_SYMBOL_GPL(hrtimer_init); |
c0a31329 | 1329 | |
887d9dc9 PZ |
1330 | /* |
1331 | * A timer is active, when it is enqueued into the rbtree or the | |
1332 | * callback function is running or it's in the state of being migrated | |
1333 | * to another cpu. | |
c0a31329 | 1334 | * |
887d9dc9 | 1335 | * It is important for this function to not return a false negative. |
c0a31329 | 1336 | */ |
887d9dc9 | 1337 | bool hrtimer_active(const struct hrtimer *timer) |
c0a31329 | 1338 | { |
3f0b9e8e | 1339 | struct hrtimer_clock_base *base; |
887d9dc9 | 1340 | unsigned int seq; |
c0a31329 | 1341 | |
887d9dc9 | 1342 | do { |
3f0b9e8e AMG |
1343 | base = READ_ONCE(timer->base); |
1344 | seq = raw_read_seqcount_begin(&base->seq); | |
c0a31329 | 1345 | |
887d9dc9 | 1346 | if (timer->state != HRTIMER_STATE_INACTIVE || |
3f0b9e8e | 1347 | base->running == timer) |
887d9dc9 PZ |
1348 | return true; |
1349 | ||
3f0b9e8e AMG |
1350 | } while (read_seqcount_retry(&base->seq, seq) || |
1351 | base != READ_ONCE(timer->base)); | |
887d9dc9 PZ |
1352 | |
1353 | return false; | |
c0a31329 | 1354 | } |
887d9dc9 | 1355 | EXPORT_SYMBOL_GPL(hrtimer_active); |
c0a31329 | 1356 | |
887d9dc9 PZ |
1357 | /* |
1358 | * The write_seqcount_barrier()s in __run_hrtimer() split the thing into 3 | |
1359 | * distinct sections: | |
1360 | * | |
1361 | * - queued: the timer is queued | |
1362 | * - callback: the timer is being ran | |
1363 | * - post: the timer is inactive or (re)queued | |
1364 | * | |
1365 | * On the read side we ensure we observe timer->state and cpu_base->running | |
1366 | * from the same section, if anything changed while we looked at it, we retry. | |
1367 | * This includes timer->base changing because sequence numbers alone are | |
1368 | * insufficient for that. | |
1369 | * | |
1370 | * The sequence numbers are required because otherwise we could still observe | |
1371 | * a false negative if the read side got smeared over multiple consequtive | |
1372 | * __run_hrtimer() invocations. | |
1373 | */ | |
1374 | ||
21d6d52a TG |
1375 | static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base, |
1376 | struct hrtimer_clock_base *base, | |
dd934aa8 AMG |
1377 | struct hrtimer *timer, ktime_t *now, |
1378 | unsigned long flags) | |
d3d74453 | 1379 | { |
d3d74453 PZ |
1380 | enum hrtimer_restart (*fn)(struct hrtimer *); |
1381 | int restart; | |
1382 | ||
887d9dc9 | 1383 | lockdep_assert_held(&cpu_base->lock); |
ca109491 | 1384 | |
c6a2a177 | 1385 | debug_deactivate(timer); |
3f0b9e8e | 1386 | base->running = timer; |
887d9dc9 PZ |
1387 | |
1388 | /* | |
1389 | * Separate the ->running assignment from the ->state assignment. | |
1390 | * | |
1391 | * As with a regular write barrier, this ensures the read side in | |
3f0b9e8e | 1392 | * hrtimer_active() cannot observe base->running == NULL && |
887d9dc9 PZ |
1393 | * timer->state == INACTIVE. |
1394 | */ | |
3f0b9e8e | 1395 | raw_write_seqcount_barrier(&base->seq); |
887d9dc9 PZ |
1396 | |
1397 | __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0); | |
d3d74453 | 1398 | fn = timer->function; |
ca109491 | 1399 | |
203cbf77 TG |
1400 | /* |
1401 | * Clear the 'is relative' flag for the TIME_LOW_RES case. If the | |
1402 | * timer is restarted with a period then it becomes an absolute | |
1403 | * timer. If its not restarted it does not matter. | |
1404 | */ | |
1405 | if (IS_ENABLED(CONFIG_TIME_LOW_RES)) | |
1406 | timer->is_rel = false; | |
1407 | ||
ca109491 | 1408 | /* |
d05ca13b TG |
1409 | * The timer is marked as running in the CPU base, so it is |
1410 | * protected against migration to a different CPU even if the lock | |
1411 | * is dropped. | |
ca109491 | 1412 | */ |
dd934aa8 | 1413 | raw_spin_unlock_irqrestore(&cpu_base->lock, flags); |
c6a2a177 | 1414 | trace_hrtimer_expire_entry(timer, now); |
ca109491 | 1415 | restart = fn(timer); |
c6a2a177 | 1416 | trace_hrtimer_expire_exit(timer); |
dd934aa8 | 1417 | raw_spin_lock_irq(&cpu_base->lock); |
d3d74453 PZ |
1418 | |
1419 | /* | |
887d9dc9 | 1420 | * Note: We clear the running state after enqueue_hrtimer and |
b4d90e9f | 1421 | * we do not reprogram the event hardware. Happens either in |
e3f1d883 | 1422 | * hrtimer_start_range_ns() or in hrtimer_interrupt() |
5de2755c PZ |
1423 | * |
1424 | * Note: Because we dropped the cpu_base->lock above, | |
1425 | * hrtimer_start_range_ns() can have popped in and enqueued the timer | |
1426 | * for us already. | |
d3d74453 | 1427 | */ |
5de2755c PZ |
1428 | if (restart != HRTIMER_NORESTART && |
1429 | !(timer->state & HRTIMER_STATE_ENQUEUED)) | |
63e2ed36 | 1430 | enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS); |
f13d4f97 | 1431 | |
887d9dc9 PZ |
1432 | /* |
1433 | * Separate the ->running assignment from the ->state assignment. | |
1434 | * | |
1435 | * As with a regular write barrier, this ensures the read side in | |
3f0b9e8e | 1436 | * hrtimer_active() cannot observe base->running.timer == NULL && |
887d9dc9 PZ |
1437 | * timer->state == INACTIVE. |
1438 | */ | |
3f0b9e8e | 1439 | raw_write_seqcount_barrier(&base->seq); |
f13d4f97 | 1440 | |
3f0b9e8e AMG |
1441 | WARN_ON_ONCE(base->running != timer); |
1442 | base->running = NULL; | |
d3d74453 PZ |
1443 | } |
1444 | ||
dd934aa8 | 1445 | static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now, |
c458b1d1 | 1446 | unsigned long flags, unsigned int active_mask) |
54cdfdb4 | 1447 | { |
c272ca58 | 1448 | struct hrtimer_clock_base *base; |
c458b1d1 | 1449 | unsigned int active = cpu_base->active_bases & active_mask; |
6ff7041d | 1450 | |
c272ca58 | 1451 | for_each_active_base(base, cpu_base, active) { |
998adc3d | 1452 | struct timerqueue_node *node; |
ab8177bc TG |
1453 | ktime_t basenow; |
1454 | ||
54cdfdb4 TG |
1455 | basenow = ktime_add(now, base->offset); |
1456 | ||
998adc3d | 1457 | while ((node = timerqueue_getnext(&base->active))) { |
54cdfdb4 TG |
1458 | struct hrtimer *timer; |
1459 | ||
998adc3d | 1460 | timer = container_of(node, struct hrtimer, node); |
54cdfdb4 | 1461 | |
654c8e0b AV |
1462 | /* |
1463 | * The immediate goal for using the softexpires is | |
1464 | * minimizing wakeups, not running timers at the | |
1465 | * earliest interrupt after their soft expiration. | |
1466 | * This allows us to avoid using a Priority Search | |
1467 | * Tree, which can answer a stabbing querry for | |
1468 | * overlapping intervals and instead use the simple | |
1469 | * BST we already have. | |
1470 | * We don't add extra wakeups by delaying timers that | |
1471 | * are right-of a not yet expired timer, because that | |
1472 | * timer will have to trigger a wakeup anyway. | |
1473 | */ | |
2456e855 | 1474 | if (basenow < hrtimer_get_softexpires_tv64(timer)) |
54cdfdb4 | 1475 | break; |
54cdfdb4 | 1476 | |
dd934aa8 | 1477 | __run_hrtimer(cpu_base, base, timer, &basenow, flags); |
54cdfdb4 | 1478 | } |
54cdfdb4 | 1479 | } |
21d6d52a TG |
1480 | } |
1481 | ||
5da70160 AMG |
1482 | static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h) |
1483 | { | |
1484 | struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); | |
1485 | unsigned long flags; | |
1486 | ktime_t now; | |
1487 | ||
1488 | raw_spin_lock_irqsave(&cpu_base->lock, flags); | |
1489 | ||
1490 | now = hrtimer_update_base(cpu_base); | |
1491 | __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_SOFT); | |
1492 | ||
1493 | cpu_base->softirq_activated = 0; | |
1494 | hrtimer_update_softirq_timer(cpu_base, true); | |
1495 | ||
1496 | raw_spin_unlock_irqrestore(&cpu_base->lock, flags); | |
1497 | } | |
1498 | ||
21d6d52a TG |
1499 | #ifdef CONFIG_HIGH_RES_TIMERS |
1500 | ||
1501 | /* | |
1502 | * High resolution timer interrupt | |
1503 | * Called with interrupts disabled | |
1504 | */ | |
1505 | void hrtimer_interrupt(struct clock_event_device *dev) | |
1506 | { | |
1507 | struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); | |
1508 | ktime_t expires_next, now, entry_time, delta; | |
dd934aa8 | 1509 | unsigned long flags; |
21d6d52a TG |
1510 | int retries = 0; |
1511 | ||
1512 | BUG_ON(!cpu_base->hres_active); | |
1513 | cpu_base->nr_events++; | |
2456e855 | 1514 | dev->next_event = KTIME_MAX; |
21d6d52a | 1515 | |
dd934aa8 | 1516 | raw_spin_lock_irqsave(&cpu_base->lock, flags); |
21d6d52a TG |
1517 | entry_time = now = hrtimer_update_base(cpu_base); |
1518 | retry: | |
1519 | cpu_base->in_hrtirq = 1; | |
1520 | /* | |
1521 | * We set expires_next to KTIME_MAX here with cpu_base->lock | |
1522 | * held to prevent that a timer is enqueued in our queue via | |
1523 | * the migration code. This does not affect enqueueing of | |
1524 | * timers which run their callback and need to be requeued on | |
1525 | * this CPU. | |
1526 | */ | |
2456e855 | 1527 | cpu_base->expires_next = KTIME_MAX; |
21d6d52a | 1528 | |
5da70160 AMG |
1529 | if (!ktime_before(now, cpu_base->softirq_expires_next)) { |
1530 | cpu_base->softirq_expires_next = KTIME_MAX; | |
1531 | cpu_base->softirq_activated = 1; | |
1532 | raise_softirq_irqoff(HRTIMER_SOFTIRQ); | |
1533 | } | |
1534 | ||
c458b1d1 | 1535 | __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD); |
21d6d52a | 1536 | |
9bc74919 | 1537 | /* Reevaluate the clock bases for the next expiry */ |
5da70160 | 1538 | expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL); |
6ff7041d TG |
1539 | /* |
1540 | * Store the new expiry value so the migration code can verify | |
1541 | * against it. | |
1542 | */ | |
54cdfdb4 | 1543 | cpu_base->expires_next = expires_next; |
9bc74919 | 1544 | cpu_base->in_hrtirq = 0; |
dd934aa8 | 1545 | raw_spin_unlock_irqrestore(&cpu_base->lock, flags); |
54cdfdb4 TG |
1546 | |
1547 | /* Reprogramming necessary ? */ | |
d2540875 | 1548 | if (!tick_program_event(expires_next, 0)) { |
41d2e494 TG |
1549 | cpu_base->hang_detected = 0; |
1550 | return; | |
54cdfdb4 | 1551 | } |
41d2e494 TG |
1552 | |
1553 | /* | |
1554 | * The next timer was already expired due to: | |
1555 | * - tracing | |
1556 | * - long lasting callbacks | |
1557 | * - being scheduled away when running in a VM | |
1558 | * | |
1559 | * We need to prevent that we loop forever in the hrtimer | |
1560 | * interrupt routine. We give it 3 attempts to avoid | |
1561 | * overreacting on some spurious event. | |
5baefd6d JS |
1562 | * |
1563 | * Acquire base lock for updating the offsets and retrieving | |
1564 | * the current time. | |
41d2e494 | 1565 | */ |
dd934aa8 | 1566 | raw_spin_lock_irqsave(&cpu_base->lock, flags); |
5baefd6d | 1567 | now = hrtimer_update_base(cpu_base); |
41d2e494 TG |
1568 | cpu_base->nr_retries++; |
1569 | if (++retries < 3) | |
1570 | goto retry; | |
1571 | /* | |
1572 | * Give the system a chance to do something else than looping | |
1573 | * here. We stored the entry time, so we know exactly how long | |
1574 | * we spent here. We schedule the next event this amount of | |
1575 | * time away. | |
1576 | */ | |
1577 | cpu_base->nr_hangs++; | |
1578 | cpu_base->hang_detected = 1; | |
dd934aa8 AMG |
1579 | raw_spin_unlock_irqrestore(&cpu_base->lock, flags); |
1580 | ||
41d2e494 | 1581 | delta = ktime_sub(now, entry_time); |
2456e855 TG |
1582 | if ((unsigned int)delta > cpu_base->max_hang_time) |
1583 | cpu_base->max_hang_time = (unsigned int) delta; | |
41d2e494 TG |
1584 | /* |
1585 | * Limit it to a sensible value as we enforce a longer | |
1586 | * delay. Give the CPU at least 100ms to catch up. | |
1587 | */ | |
2456e855 | 1588 | if (delta > 100 * NSEC_PER_MSEC) |
41d2e494 TG |
1589 | expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC); |
1590 | else | |
1591 | expires_next = ktime_add(now, delta); | |
1592 | tick_program_event(expires_next, 1); | |
7a6e5537 | 1593 | pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta)); |
54cdfdb4 TG |
1594 | } |
1595 | ||
016da201 | 1596 | /* called with interrupts disabled */ |
c6eb3f70 | 1597 | static inline void __hrtimer_peek_ahead_timers(void) |
8bdec955 TG |
1598 | { |
1599 | struct tick_device *td; | |
1600 | ||
1601 | if (!hrtimer_hres_active()) | |
1602 | return; | |
1603 | ||
22127e93 | 1604 | td = this_cpu_ptr(&tick_cpu_device); |
8bdec955 TG |
1605 | if (td && td->evtdev) |
1606 | hrtimer_interrupt(td->evtdev); | |
1607 | } | |
1608 | ||
82c5b7b5 IM |
1609 | #else /* CONFIG_HIGH_RES_TIMERS */ |
1610 | ||
1611 | static inline void __hrtimer_peek_ahead_timers(void) { } | |
1612 | ||
1613 | #endif /* !CONFIG_HIGH_RES_TIMERS */ | |
82f67cd9 | 1614 | |
d3d74453 | 1615 | /* |
c6eb3f70 | 1616 | * Called from run_local_timers in hardirq context every jiffy |
d3d74453 | 1617 | */ |
833883d9 | 1618 | void hrtimer_run_queues(void) |
d3d74453 | 1619 | { |
dc5df73b | 1620 | struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); |
dd934aa8 | 1621 | unsigned long flags; |
21d6d52a | 1622 | ktime_t now; |
c0a31329 | 1623 | |
e19ffe8b | 1624 | if (__hrtimer_hres_active(cpu_base)) |
d3d74453 | 1625 | return; |
54cdfdb4 | 1626 | |
d3d74453 | 1627 | /* |
c6eb3f70 TG |
1628 | * This _is_ ugly: We have to check periodically, whether we |
1629 | * can switch to highres and / or nohz mode. The clocksource | |
1630 | * switch happens with xtime_lock held. Notification from | |
1631 | * there only sets the check bit in the tick_oneshot code, | |
1632 | * otherwise we might deadlock vs. xtime_lock. | |
d3d74453 | 1633 | */ |
c6eb3f70 | 1634 | if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) { |
d3d74453 | 1635 | hrtimer_switch_to_hres(); |
3055adda | 1636 | return; |
833883d9 | 1637 | } |
c6eb3f70 | 1638 | |
dd934aa8 | 1639 | raw_spin_lock_irqsave(&cpu_base->lock, flags); |
21d6d52a | 1640 | now = hrtimer_update_base(cpu_base); |
5da70160 AMG |
1641 | |
1642 | if (!ktime_before(now, cpu_base->softirq_expires_next)) { | |
1643 | cpu_base->softirq_expires_next = KTIME_MAX; | |
1644 | cpu_base->softirq_activated = 1; | |
1645 | raise_softirq_irqoff(HRTIMER_SOFTIRQ); | |
1646 | } | |
1647 | ||
c458b1d1 | 1648 | __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD); |
dd934aa8 | 1649 | raw_spin_unlock_irqrestore(&cpu_base->lock, flags); |
c0a31329 TG |
1650 | } |
1651 | ||
10c94ec1 TG |
1652 | /* |
1653 | * Sleep related functions: | |
1654 | */ | |
c9cb2e3d | 1655 | static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) |
00362e33 TG |
1656 | { |
1657 | struct hrtimer_sleeper *t = | |
1658 | container_of(timer, struct hrtimer_sleeper, timer); | |
1659 | struct task_struct *task = t->task; | |
1660 | ||
1661 | t->task = NULL; | |
1662 | if (task) | |
1663 | wake_up_process(task); | |
1664 | ||
1665 | return HRTIMER_NORESTART; | |
1666 | } | |
1667 | ||
01656464 TG |
1668 | /** |
1669 | * hrtimer_sleeper_start_expires - Start a hrtimer sleeper timer | |
1670 | * @sl: sleeper to be started | |
1671 | * @mode: timer mode abs/rel | |
1672 | * | |
1673 | * Wrapper around hrtimer_start_expires() for hrtimer_sleeper based timers | |
1674 | * to allow PREEMPT_RT to tweak the delivery mode (soft/hardirq context) | |
1675 | */ | |
1676 | void hrtimer_sleeper_start_expires(struct hrtimer_sleeper *sl, | |
1677 | enum hrtimer_mode mode) | |
1678 | { | |
1842f5a4 SAS |
1679 | /* |
1680 | * Make the enqueue delivery mode check work on RT. If the sleeper | |
1681 | * was initialized for hard interrupt delivery, force the mode bit. | |
1682 | * This is a special case for hrtimer_sleepers because | |
1683 | * hrtimer_init_sleeper() determines the delivery mode on RT so the | |
1684 | * fiddling with this decision is avoided at the call sites. | |
1685 | */ | |
1686 | if (IS_ENABLED(CONFIG_PREEMPT_RT) && sl->timer.is_hard) | |
1687 | mode |= HRTIMER_MODE_HARD; | |
1688 | ||
01656464 TG |
1689 | hrtimer_start_expires(&sl->timer, mode); |
1690 | } | |
1691 | EXPORT_SYMBOL_GPL(hrtimer_sleeper_start_expires); | |
1692 | ||
dbc1625f SAS |
1693 | static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl, |
1694 | clockid_t clock_id, enum hrtimer_mode mode) | |
00362e33 | 1695 | { |
1842f5a4 SAS |
1696 | /* |
1697 | * On PREEMPT_RT enabled kernels hrtimers which are not explicitely | |
1698 | * marked for hard interrupt expiry mode are moved into soft | |
1699 | * interrupt context either for latency reasons or because the | |
1700 | * hrtimer callback takes regular spinlocks or invokes other | |
1701 | * functions which are not suitable for hard interrupt context on | |
1702 | * PREEMPT_RT. | |
1703 | * | |
1704 | * The hrtimer_sleeper callback is RT compatible in hard interrupt | |
1705 | * context, but there is a latency concern: Untrusted userspace can | |
1706 | * spawn many threads which arm timers for the same expiry time on | |
1707 | * the same CPU. That causes a latency spike due to the wakeup of | |
1708 | * a gazillion threads. | |
1709 | * | |
1710 | * OTOH, priviledged real-time user space applications rely on the | |
1711 | * low latency of hard interrupt wakeups. If the current task is in | |
1712 | * a real-time scheduling class, mark the mode for hard interrupt | |
1713 | * expiry. | |
1714 | */ | |
1715 | if (IS_ENABLED(CONFIG_PREEMPT_RT)) { | |
1716 | if (task_is_realtime(current) && !(mode & HRTIMER_MODE_SOFT)) | |
1717 | mode |= HRTIMER_MODE_HARD; | |
1718 | } | |
1719 | ||
dbc1625f | 1720 | __hrtimer_init(&sl->timer, clock_id, mode); |
00362e33 | 1721 | sl->timer.function = hrtimer_wakeup; |
b7449487 | 1722 | sl->task = current; |
00362e33 | 1723 | } |
dbc1625f SAS |
1724 | |
1725 | /** | |
1726 | * hrtimer_init_sleeper - initialize sleeper to the given clock | |
1727 | * @sl: sleeper to be initialized | |
1728 | * @clock_id: the clock to be used | |
1729 | * @mode: timer mode abs/rel | |
1730 | */ | |
1731 | void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id, | |
1732 | enum hrtimer_mode mode) | |
1733 | { | |
1734 | debug_init(&sl->timer, clock_id, mode); | |
1735 | __hrtimer_init_sleeper(sl, clock_id, mode); | |
1736 | ||
1737 | } | |
2bc481cf | 1738 | EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); |
00362e33 | 1739 | |
c0edd7c9 | 1740 | int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts) |
ce41aaf4 AV |
1741 | { |
1742 | switch(restart->nanosleep.type) { | |
0fe27955 | 1743 | #ifdef CONFIG_COMPAT_32BIT_TIME |
ce41aaf4 | 1744 | case TT_COMPAT: |
9afc5eee | 1745 | if (put_old_timespec32(ts, restart->nanosleep.compat_rmtp)) |
ce41aaf4 AV |
1746 | return -EFAULT; |
1747 | break; | |
1748 | #endif | |
1749 | case TT_NATIVE: | |
c0edd7c9 | 1750 | if (put_timespec64(ts, restart->nanosleep.rmtp)) |
ce41aaf4 AV |
1751 | return -EFAULT; |
1752 | break; | |
1753 | default: | |
1754 | BUG(); | |
1755 | } | |
1756 | return -ERESTART_RESTARTBLOCK; | |
1757 | } | |
1758 | ||
669d7868 | 1759 | static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) |
432569bb | 1760 | { |
edbeda46 AV |
1761 | struct restart_block *restart; |
1762 | ||
432569bb RZ |
1763 | do { |
1764 | set_current_state(TASK_INTERRUPTIBLE); | |
01656464 | 1765 | hrtimer_sleeper_start_expires(t, mode); |
432569bb | 1766 | |
54cdfdb4 | 1767 | if (likely(t->task)) |
b0f8c44f | 1768 | freezable_schedule(); |
432569bb | 1769 | |
669d7868 | 1770 | hrtimer_cancel(&t->timer); |
c9cb2e3d | 1771 | mode = HRTIMER_MODE_ABS; |
669d7868 TG |
1772 | |
1773 | } while (t->task && !signal_pending(current)); | |
432569bb | 1774 | |
3588a085 PZ |
1775 | __set_current_state(TASK_RUNNING); |
1776 | ||
a7602681 | 1777 | if (!t->task) |
080344b9 | 1778 | return 0; |
080344b9 | 1779 | |
edbeda46 AV |
1780 | restart = ¤t->restart_block; |
1781 | if (restart->nanosleep.type != TT_NONE) { | |
a7602681 | 1782 | ktime_t rem = hrtimer_expires_remaining(&t->timer); |
c0edd7c9 | 1783 | struct timespec64 rmt; |
edbeda46 | 1784 | |
a7602681 AV |
1785 | if (rem <= 0) |
1786 | return 0; | |
c0edd7c9 | 1787 | rmt = ktime_to_timespec64(rem); |
a7602681 | 1788 | |
ce41aaf4 | 1789 | return nanosleep_copyout(restart, &rmt); |
a7602681 AV |
1790 | } |
1791 | return -ERESTART_RESTARTBLOCK; | |
080344b9 ON |
1792 | } |
1793 | ||
fb923c4a | 1794 | static long __sched hrtimer_nanosleep_restart(struct restart_block *restart) |
10c94ec1 | 1795 | { |
669d7868 | 1796 | struct hrtimer_sleeper t; |
a7602681 | 1797 | int ret; |
10c94ec1 | 1798 | |
dbc1625f SAS |
1799 | hrtimer_init_sleeper_on_stack(&t, restart->nanosleep.clockid, |
1800 | HRTIMER_MODE_ABS); | |
cc584b21 | 1801 | hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); |
a7602681 | 1802 | ret = do_nanosleep(&t, HRTIMER_MODE_ABS); |
237fc6e7 TG |
1803 | destroy_hrtimer_on_stack(&t.timer); |
1804 | return ret; | |
10c94ec1 TG |
1805 | } |
1806 | ||
938e7cf2 | 1807 | long hrtimer_nanosleep(const struct timespec64 *rqtp, |
10c94ec1 TG |
1808 | const enum hrtimer_mode mode, const clockid_t clockid) |
1809 | { | |
a7602681 | 1810 | struct restart_block *restart; |
669d7868 | 1811 | struct hrtimer_sleeper t; |
237fc6e7 | 1812 | int ret = 0; |
da8b44d5 | 1813 | u64 slack; |
3bd01206 AV |
1814 | |
1815 | slack = current->timer_slack_ns; | |
aab03e05 | 1816 | if (dl_task(current) || rt_task(current)) |
3bd01206 | 1817 | slack = 0; |
10c94ec1 | 1818 | |
dbc1625f | 1819 | hrtimer_init_sleeper_on_stack(&t, clockid, mode); |
ad196384 | 1820 | hrtimer_set_expires_range_ns(&t.timer, timespec64_to_ktime(*rqtp), slack); |
a7602681 AV |
1821 | ret = do_nanosleep(&t, mode); |
1822 | if (ret != -ERESTART_RESTARTBLOCK) | |
237fc6e7 | 1823 | goto out; |
10c94ec1 | 1824 | |
7978672c | 1825 | /* Absolute timers do not update the rmtp value and restart: */ |
237fc6e7 TG |
1826 | if (mode == HRTIMER_MODE_ABS) { |
1827 | ret = -ERESTARTNOHAND; | |
1828 | goto out; | |
1829 | } | |
10c94ec1 | 1830 | |
a7602681 | 1831 | restart = ¤t->restart_block; |
1711ef38 | 1832 | restart->fn = hrtimer_nanosleep_restart; |
ab8177bc | 1833 | restart->nanosleep.clockid = t.timer.base->clockid; |
cc584b21 | 1834 | restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); |
237fc6e7 TG |
1835 | out: |
1836 | destroy_hrtimer_on_stack(&t.timer); | |
1837 | return ret; | |
10c94ec1 TG |
1838 | } |
1839 | ||
01909974 DD |
1840 | #if !defined(CONFIG_64BIT_TIME) || defined(CONFIG_64BIT) |
1841 | ||
1842 | SYSCALL_DEFINE2(nanosleep, struct __kernel_timespec __user *, rqtp, | |
1843 | struct __kernel_timespec __user *, rmtp) | |
6ba1b912 | 1844 | { |
c0edd7c9 | 1845 | struct timespec64 tu; |
6ba1b912 | 1846 | |
c0edd7c9 | 1847 | if (get_timespec64(&tu, rqtp)) |
6ba1b912 TG |
1848 | return -EFAULT; |
1849 | ||
c0edd7c9 | 1850 | if (!timespec64_valid(&tu)) |
6ba1b912 TG |
1851 | return -EINVAL; |
1852 | ||
edbeda46 | 1853 | current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE; |
192a82f9 | 1854 | current->restart_block.nanosleep.rmtp = rmtp; |
c0edd7c9 | 1855 | return hrtimer_nanosleep(&tu, HRTIMER_MODE_REL, CLOCK_MONOTONIC); |
6ba1b912 TG |
1856 | } |
1857 | ||
01909974 DD |
1858 | #endif |
1859 | ||
b5793b0d | 1860 | #ifdef CONFIG_COMPAT_32BIT_TIME |
edbeda46 | 1861 | |
8dabe724 | 1862 | SYSCALL_DEFINE2(nanosleep_time32, struct old_timespec32 __user *, rqtp, |
9afc5eee | 1863 | struct old_timespec32 __user *, rmtp) |
edbeda46 | 1864 | { |
c0edd7c9 | 1865 | struct timespec64 tu; |
edbeda46 | 1866 | |
9afc5eee | 1867 | if (get_old_timespec32(&tu, rqtp)) |
edbeda46 AV |
1868 | return -EFAULT; |
1869 | ||
c0edd7c9 | 1870 | if (!timespec64_valid(&tu)) |
edbeda46 AV |
1871 | return -EINVAL; |
1872 | ||
1873 | current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE; | |
1874 | current->restart_block.nanosleep.compat_rmtp = rmtp; | |
c0edd7c9 | 1875 | return hrtimer_nanosleep(&tu, HRTIMER_MODE_REL, CLOCK_MONOTONIC); |
edbeda46 AV |
1876 | } |
1877 | #endif | |
1878 | ||
c0a31329 TG |
1879 | /* |
1880 | * Functions related to boot-time initialization: | |
1881 | */ | |
27590dc1 | 1882 | int hrtimers_prepare_cpu(unsigned int cpu) |
c0a31329 | 1883 | { |
3c8aa39d | 1884 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); |
c0a31329 TG |
1885 | int i; |
1886 | ||
998adc3d | 1887 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
3c8aa39d | 1888 | cpu_base->clock_base[i].cpu_base = cpu_base; |
998adc3d JS |
1889 | timerqueue_init_head(&cpu_base->clock_base[i].active); |
1890 | } | |
3c8aa39d | 1891 | |
cddd0248 | 1892 | cpu_base->cpu = cpu; |
303c146d | 1893 | cpu_base->active_bases = 0; |
28bfd18b | 1894 | cpu_base->hres_active = 0; |
303c146d TG |
1895 | cpu_base->hang_detected = 0; |
1896 | cpu_base->next_timer = NULL; | |
1897 | cpu_base->softirq_next_timer = NULL; | |
07a9a7ea | 1898 | cpu_base->expires_next = KTIME_MAX; |
5da70160 | 1899 | cpu_base->softirq_expires_next = KTIME_MAX; |
27590dc1 | 1900 | return 0; |
c0a31329 TG |
1901 | } |
1902 | ||
1903 | #ifdef CONFIG_HOTPLUG_CPU | |
1904 | ||
ca109491 | 1905 | static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, |
37810659 | 1906 | struct hrtimer_clock_base *new_base) |
c0a31329 TG |
1907 | { |
1908 | struct hrtimer *timer; | |
998adc3d | 1909 | struct timerqueue_node *node; |
c0a31329 | 1910 | |
998adc3d JS |
1911 | while ((node = timerqueue_getnext(&old_base->active))) { |
1912 | timer = container_of(node, struct hrtimer, node); | |
54cdfdb4 | 1913 | BUG_ON(hrtimer_callback_running(timer)); |
c6a2a177 | 1914 | debug_deactivate(timer); |
b00c1a99 TG |
1915 | |
1916 | /* | |
c04dca02 | 1917 | * Mark it as ENQUEUED not INACTIVE otherwise the |
b00c1a99 TG |
1918 | * timer could be seen as !active and just vanish away |
1919 | * under us on another CPU | |
1920 | */ | |
c04dca02 | 1921 | __remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0); |
c0a31329 | 1922 | timer->base = new_base; |
54cdfdb4 | 1923 | /* |
e3f1d883 TG |
1924 | * Enqueue the timers on the new cpu. This does not |
1925 | * reprogram the event device in case the timer | |
1926 | * expires before the earliest on this CPU, but we run | |
1927 | * hrtimer_interrupt after we migrated everything to | |
1928 | * sort out already expired timers and reprogram the | |
1929 | * event device. | |
54cdfdb4 | 1930 | */ |
63e2ed36 | 1931 | enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS); |
c0a31329 TG |
1932 | } |
1933 | } | |
1934 | ||
27590dc1 | 1935 | int hrtimers_dead_cpu(unsigned int scpu) |
c0a31329 | 1936 | { |
3c8aa39d | 1937 | struct hrtimer_cpu_base *old_base, *new_base; |
731a55ba | 1938 | int i; |
c0a31329 | 1939 | |
37810659 | 1940 | BUG_ON(cpu_online(scpu)); |
37810659 | 1941 | tick_cancel_sched_timer(scpu); |
731a55ba | 1942 | |
5da70160 AMG |
1943 | /* |
1944 | * this BH disable ensures that raise_softirq_irqoff() does | |
1945 | * not wakeup ksoftirqd (and acquire the pi-lock) while | |
1946 | * holding the cpu_base lock | |
1947 | */ | |
1948 | local_bh_disable(); | |
731a55ba TG |
1949 | local_irq_disable(); |
1950 | old_base = &per_cpu(hrtimer_bases, scpu); | |
dc5df73b | 1951 | new_base = this_cpu_ptr(&hrtimer_bases); |
d82f0b0f ON |
1952 | /* |
1953 | * The caller is globally serialized and nobody else | |
1954 | * takes two locks at once, deadlock is not possible. | |
1955 | */ | |
ecb49d1a TG |
1956 | raw_spin_lock(&new_base->lock); |
1957 | raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); | |
c0a31329 | 1958 | |
3c8aa39d | 1959 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
ca109491 | 1960 | migrate_hrtimer_list(&old_base->clock_base[i], |
37810659 | 1961 | &new_base->clock_base[i]); |
c0a31329 TG |
1962 | } |
1963 | ||
5da70160 AMG |
1964 | /* |
1965 | * The migration might have changed the first expiring softirq | |
1966 | * timer on this CPU. Update it. | |
1967 | */ | |
1968 | hrtimer_update_softirq_timer(new_base, false); | |
1969 | ||
ecb49d1a TG |
1970 | raw_spin_unlock(&old_base->lock); |
1971 | raw_spin_unlock(&new_base->lock); | |
37810659 | 1972 | |
731a55ba TG |
1973 | /* Check, if we got expired work to do */ |
1974 | __hrtimer_peek_ahead_timers(); | |
1975 | local_irq_enable(); | |
5da70160 | 1976 | local_bh_enable(); |
27590dc1 | 1977 | return 0; |
c0a31329 | 1978 | } |
37810659 | 1979 | |
c0a31329 TG |
1980 | #endif /* CONFIG_HOTPLUG_CPU */ |
1981 | ||
c0a31329 TG |
1982 | void __init hrtimers_init(void) |
1983 | { | |
27590dc1 | 1984 | hrtimers_prepare_cpu(smp_processor_id()); |
5da70160 | 1985 | open_softirq(HRTIMER_SOFTIRQ, hrtimer_run_softirq); |
c0a31329 TG |
1986 | } |
1987 | ||
7bb67439 | 1988 | /** |
351b3f7a | 1989 | * schedule_hrtimeout_range_clock - sleep until timeout |
7bb67439 | 1990 | * @expires: timeout value (ktime_t) |
654c8e0b | 1991 | * @delta: slack in expires timeout (ktime_t) |
90777713 AMG |
1992 | * @mode: timer mode |
1993 | * @clock_id: timer clock to be used | |
7bb67439 | 1994 | */ |
351b3f7a | 1995 | int __sched |
da8b44d5 | 1996 | schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta, |
90777713 | 1997 | const enum hrtimer_mode mode, clockid_t clock_id) |
7bb67439 AV |
1998 | { |
1999 | struct hrtimer_sleeper t; | |
2000 | ||
2001 | /* | |
2002 | * Optimize when a zero timeout value is given. It does not | |
2003 | * matter whether this is an absolute or a relative time. | |
2004 | */ | |
2456e855 | 2005 | if (expires && *expires == 0) { |
7bb67439 AV |
2006 | __set_current_state(TASK_RUNNING); |
2007 | return 0; | |
2008 | } | |
2009 | ||
2010 | /* | |
43b21013 | 2011 | * A NULL parameter means "infinite" |
7bb67439 AV |
2012 | */ |
2013 | if (!expires) { | |
2014 | schedule(); | |
7bb67439 AV |
2015 | return -EINTR; |
2016 | } | |
2017 | ||
dbc1625f | 2018 | hrtimer_init_sleeper_on_stack(&t, clock_id, mode); |
654c8e0b | 2019 | hrtimer_set_expires_range_ns(&t.timer, *expires, delta); |
01656464 | 2020 | hrtimer_sleeper_start_expires(&t, mode); |
7bb67439 AV |
2021 | |
2022 | if (likely(t.task)) | |
2023 | schedule(); | |
2024 | ||
2025 | hrtimer_cancel(&t.timer); | |
2026 | destroy_hrtimer_on_stack(&t.timer); | |
2027 | ||
2028 | __set_current_state(TASK_RUNNING); | |
2029 | ||
2030 | return !t.task ? 0 : -EINTR; | |
2031 | } | |
351b3f7a CE |
2032 | |
2033 | /** | |
2034 | * schedule_hrtimeout_range - sleep until timeout | |
2035 | * @expires: timeout value (ktime_t) | |
2036 | * @delta: slack in expires timeout (ktime_t) | |
90777713 | 2037 | * @mode: timer mode |
351b3f7a CE |
2038 | * |
2039 | * Make the current task sleep until the given expiry time has | |
2040 | * elapsed. The routine will return immediately unless | |
2041 | * the current task state has been set (see set_current_state()). | |
2042 | * | |
2043 | * The @delta argument gives the kernel the freedom to schedule the | |
2044 | * actual wakeup to a time that is both power and performance friendly. | |
2045 | * The kernel give the normal best effort behavior for "@expires+@delta", | |
2046 | * but may decide to fire the timer earlier, but no earlier than @expires. | |
2047 | * | |
2048 | * You can set the task state as follows - | |
2049 | * | |
2050 | * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to | |
4b7e9cf9 DA |
2051 | * pass before the routine returns unless the current task is explicitly |
2052 | * woken up, (e.g. by wake_up_process()). | |
351b3f7a CE |
2053 | * |
2054 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | |
4b7e9cf9 DA |
2055 | * delivered to the current task or the current task is explicitly woken |
2056 | * up. | |
351b3f7a CE |
2057 | * |
2058 | * The current task state is guaranteed to be TASK_RUNNING when this | |
2059 | * routine returns. | |
2060 | * | |
4b7e9cf9 DA |
2061 | * Returns 0 when the timer has expired. If the task was woken before the |
2062 | * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or | |
2063 | * by an explicit wakeup, it returns -EINTR. | |
351b3f7a | 2064 | */ |
da8b44d5 | 2065 | int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta, |
351b3f7a CE |
2066 | const enum hrtimer_mode mode) |
2067 | { | |
2068 | return schedule_hrtimeout_range_clock(expires, delta, mode, | |
2069 | CLOCK_MONOTONIC); | |
2070 | } | |
654c8e0b AV |
2071 | EXPORT_SYMBOL_GPL(schedule_hrtimeout_range); |
2072 | ||
2073 | /** | |
2074 | * schedule_hrtimeout - sleep until timeout | |
2075 | * @expires: timeout value (ktime_t) | |
90777713 | 2076 | * @mode: timer mode |
654c8e0b AV |
2077 | * |
2078 | * Make the current task sleep until the given expiry time has | |
2079 | * elapsed. The routine will return immediately unless | |
2080 | * the current task state has been set (see set_current_state()). | |
2081 | * | |
2082 | * You can set the task state as follows - | |
2083 | * | |
2084 | * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to | |
4b7e9cf9 DA |
2085 | * pass before the routine returns unless the current task is explicitly |
2086 | * woken up, (e.g. by wake_up_process()). | |
654c8e0b AV |
2087 | * |
2088 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | |
4b7e9cf9 DA |
2089 | * delivered to the current task or the current task is explicitly woken |
2090 | * up. | |
654c8e0b AV |
2091 | * |
2092 | * The current task state is guaranteed to be TASK_RUNNING when this | |
2093 | * routine returns. | |
2094 | * | |
4b7e9cf9 DA |
2095 | * Returns 0 when the timer has expired. If the task was woken before the |
2096 | * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or | |
2097 | * by an explicit wakeup, it returns -EINTR. | |
654c8e0b AV |
2098 | */ |
2099 | int __sched schedule_hrtimeout(ktime_t *expires, | |
2100 | const enum hrtimer_mode mode) | |
2101 | { | |
2102 | return schedule_hrtimeout_range(expires, 0, mode); | |
2103 | } | |
7bb67439 | 2104 | EXPORT_SYMBOL_GPL(schedule_hrtimeout); |