mm/page_alloc: prevent merging between isolated and other pageblocks
[linux-2.6-block.git] / kernel / time / timer.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/timer.c
3 *
4a22f166 4 * Kernel internal timers
1da177e4
LT
5 *
6 * Copyright (C) 1991, 1992 Linus Torvalds
7 *
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
9 *
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
20 */
21
22#include <linux/kernel_stat.h>
9984de1a 23#include <linux/export.h>
1da177e4
LT
24#include <linux/interrupt.h>
25#include <linux/percpu.h>
26#include <linux/init.h>
27#include <linux/mm.h>
28#include <linux/swap.h>
b488893a 29#include <linux/pid_namespace.h>
1da177e4
LT
30#include <linux/notifier.h>
31#include <linux/thread_info.h>
32#include <linux/time.h>
33#include <linux/jiffies.h>
34#include <linux/posix-timers.h>
35#include <linux/cpu.h>
36#include <linux/syscalls.h>
97a41e26 37#include <linux/delay.h>
79bf2bb3 38#include <linux/tick.h>
82f67cd9 39#include <linux/kallsyms.h>
e360adbe 40#include <linux/irq_work.h>
eea08f32 41#include <linux/sched.h>
cf4aebc2 42#include <linux/sched/sysctl.h>
5a0e3ad6 43#include <linux/slab.h>
1a0df594 44#include <linux/compat.h>
1da177e4
LT
45
46#include <asm/uaccess.h>
47#include <asm/unistd.h>
48#include <asm/div64.h>
49#include <asm/timex.h>
50#include <asm/io.h>
51
c1ad348b
TG
52#include "tick-internal.h"
53
2b022e3d
XG
54#define CREATE_TRACE_POINTS
55#include <trace/events/timer.h>
56
40747ffa 57__visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
ecea8d19
TG
58
59EXPORT_SYMBOL(jiffies_64);
60
1da177e4
LT
61/*
62 * per-CPU timer vector definitions:
63 */
1da177e4
LT
64#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
65#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
66#define TVN_SIZE (1 << TVN_BITS)
67#define TVR_SIZE (1 << TVR_BITS)
68#define TVN_MASK (TVN_SIZE - 1)
69#define TVR_MASK (TVR_SIZE - 1)
26cff4e2 70#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
1da177e4 71
a6fa8e5a 72struct tvec {
1dabbcec 73 struct hlist_head vec[TVN_SIZE];
a6fa8e5a 74};
1da177e4 75
a6fa8e5a 76struct tvec_root {
1dabbcec 77 struct hlist_head vec[TVR_SIZE];
a6fa8e5a 78};
1da177e4 79
a6fa8e5a 80struct tvec_base {
3691c519
ON
81 spinlock_t lock;
82 struct timer_list *running_timer;
1da177e4 83 unsigned long timer_jiffies;
97fd9ed4 84 unsigned long next_timer;
99d5f3aa 85 unsigned long active_timers;
fff42158 86 unsigned long all_timers;
d6f93829 87 int cpu;
bc7a34b8 88 bool migration_enabled;
683be13a 89 bool nohz_active;
a6fa8e5a
PM
90 struct tvec_root tv1;
91 struct tvec tv2;
92 struct tvec tv3;
93 struct tvec tv4;
94 struct tvec tv5;
6e453a67 95} ____cacheline_aligned;
1da177e4 96
e52b1db3 97
0eeda71b 98static DEFINE_PER_CPU(struct tvec_base, tvec_bases);
6e453a67 99
bc7a34b8
TG
100#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
101unsigned int sysctl_timer_migration = 1;
102
683be13a 103void timers_update_migration(bool update_nohz)
bc7a34b8
TG
104{
105 bool on = sysctl_timer_migration && tick_nohz_active;
106 unsigned int cpu;
107
108 /* Avoid the loop, if nothing to update */
109 if (this_cpu_read(tvec_bases.migration_enabled) == on)
110 return;
111
112 for_each_possible_cpu(cpu) {
113 per_cpu(tvec_bases.migration_enabled, cpu) = on;
114 per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
683be13a
TG
115 if (!update_nohz)
116 continue;
117 per_cpu(tvec_bases.nohz_active, cpu) = true;
118 per_cpu(hrtimer_bases.nohz_active, cpu) = true;
bc7a34b8
TG
119 }
120}
121
122int timer_migration_handler(struct ctl_table *table, int write,
123 void __user *buffer, size_t *lenp,
124 loff_t *ppos)
125{
126 static DEFINE_MUTEX(mutex);
127 int ret;
128
129 mutex_lock(&mutex);
130 ret = proc_dointvec(table, write, buffer, lenp, ppos);
131 if (!ret && write)
683be13a 132 timers_update_migration(false);
bc7a34b8
TG
133 mutex_unlock(&mutex);
134 return ret;
135}
136
137static inline struct tvec_base *get_target_base(struct tvec_base *base,
138 int pinned)
139{
140 if (pinned || !base->migration_enabled)
141 return this_cpu_ptr(&tvec_bases);
142 return per_cpu_ptr(&tvec_bases, get_nohz_timer_target());
143}
144#else
145static inline struct tvec_base *get_target_base(struct tvec_base *base,
146 int pinned)
147{
148 return this_cpu_ptr(&tvec_bases);
149}
150#endif
151
9c133c46
AS
152static unsigned long round_jiffies_common(unsigned long j, int cpu,
153 bool force_up)
4c36a5de
AV
154{
155 int rem;
156 unsigned long original = j;
157
158 /*
159 * We don't want all cpus firing their timers at once hitting the
160 * same lock or cachelines, so we skew each extra cpu with an extra
161 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
162 * already did this.
163 * The skew is done by adding 3*cpunr, then round, then subtract this
164 * extra offset again.
165 */
166 j += cpu * 3;
167
168 rem = j % HZ;
169
170 /*
171 * If the target jiffie is just after a whole second (which can happen
172 * due to delays of the timer irq, long irq off times etc etc) then
173 * we should round down to the whole second, not up. Use 1/4th second
174 * as cutoff for this rounding as an extreme upper bound for this.
9c133c46 175 * But never round down if @force_up is set.
4c36a5de 176 */
9c133c46 177 if (rem < HZ/4 && !force_up) /* round down */
4c36a5de
AV
178 j = j - rem;
179 else /* round up */
180 j = j - rem + HZ;
181
182 /* now that we have rounded, subtract the extra skew again */
183 j -= cpu * 3;
184
9e04d380
BVA
185 /*
186 * Make sure j is still in the future. Otherwise return the
187 * unmodified value.
188 */
189 return time_is_after_jiffies(j) ? j : original;
4c36a5de 190}
9c133c46
AS
191
192/**
193 * __round_jiffies - function to round jiffies to a full second
194 * @j: the time in (absolute) jiffies that should be rounded
195 * @cpu: the processor number on which the timeout will happen
196 *
197 * __round_jiffies() rounds an absolute time in the future (in jiffies)
198 * up or down to (approximately) full seconds. This is useful for timers
199 * for which the exact time they fire does not matter too much, as long as
200 * they fire approximately every X seconds.
201 *
202 * By rounding these timers to whole seconds, all such timers will fire
203 * at the same time, rather than at various times spread out. The goal
204 * of this is to have the CPU wake up less, which saves power.
205 *
206 * The exact rounding is skewed for each processor to avoid all
207 * processors firing at the exact same time, which could lead
208 * to lock contention or spurious cache line bouncing.
209 *
210 * The return value is the rounded version of the @j parameter.
211 */
212unsigned long __round_jiffies(unsigned long j, int cpu)
213{
214 return round_jiffies_common(j, cpu, false);
215}
4c36a5de
AV
216EXPORT_SYMBOL_GPL(__round_jiffies);
217
218/**
219 * __round_jiffies_relative - function to round jiffies to a full second
220 * @j: the time in (relative) jiffies that should be rounded
221 * @cpu: the processor number on which the timeout will happen
222 *
72fd4a35 223 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
4c36a5de
AV
224 * up or down to (approximately) full seconds. This is useful for timers
225 * for which the exact time they fire does not matter too much, as long as
226 * they fire approximately every X seconds.
227 *
228 * By rounding these timers to whole seconds, all such timers will fire
229 * at the same time, rather than at various times spread out. The goal
230 * of this is to have the CPU wake up less, which saves power.
231 *
232 * The exact rounding is skewed for each processor to avoid all
233 * processors firing at the exact same time, which could lead
234 * to lock contention or spurious cache line bouncing.
235 *
72fd4a35 236 * The return value is the rounded version of the @j parameter.
4c36a5de
AV
237 */
238unsigned long __round_jiffies_relative(unsigned long j, int cpu)
239{
9c133c46
AS
240 unsigned long j0 = jiffies;
241
242 /* Use j0 because jiffies might change while we run */
243 return round_jiffies_common(j + j0, cpu, false) - j0;
4c36a5de
AV
244}
245EXPORT_SYMBOL_GPL(__round_jiffies_relative);
246
247/**
248 * round_jiffies - function to round jiffies to a full second
249 * @j: the time in (absolute) jiffies that should be rounded
250 *
72fd4a35 251 * round_jiffies() rounds an absolute time in the future (in jiffies)
4c36a5de
AV
252 * up or down to (approximately) full seconds. This is useful for timers
253 * for which the exact time they fire does not matter too much, as long as
254 * they fire approximately every X seconds.
255 *
256 * By rounding these timers to whole seconds, all such timers will fire
257 * at the same time, rather than at various times spread out. The goal
258 * of this is to have the CPU wake up less, which saves power.
259 *
72fd4a35 260 * The return value is the rounded version of the @j parameter.
4c36a5de
AV
261 */
262unsigned long round_jiffies(unsigned long j)
263{
9c133c46 264 return round_jiffies_common(j, raw_smp_processor_id(), false);
4c36a5de
AV
265}
266EXPORT_SYMBOL_GPL(round_jiffies);
267
268/**
269 * round_jiffies_relative - function to round jiffies to a full second
270 * @j: the time in (relative) jiffies that should be rounded
271 *
72fd4a35 272 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
4c36a5de
AV
273 * up or down to (approximately) full seconds. This is useful for timers
274 * for which the exact time they fire does not matter too much, as long as
275 * they fire approximately every X seconds.
276 *
277 * By rounding these timers to whole seconds, all such timers will fire
278 * at the same time, rather than at various times spread out. The goal
279 * of this is to have the CPU wake up less, which saves power.
280 *
72fd4a35 281 * The return value is the rounded version of the @j parameter.
4c36a5de
AV
282 */
283unsigned long round_jiffies_relative(unsigned long j)
284{
285 return __round_jiffies_relative(j, raw_smp_processor_id());
286}
287EXPORT_SYMBOL_GPL(round_jiffies_relative);
288
9c133c46
AS
289/**
290 * __round_jiffies_up - function to round jiffies up to a full second
291 * @j: the time in (absolute) jiffies that should be rounded
292 * @cpu: the processor number on which the timeout will happen
293 *
294 * This is the same as __round_jiffies() except that it will never
295 * round down. This is useful for timeouts for which the exact time
296 * of firing does not matter too much, as long as they don't fire too
297 * early.
298 */
299unsigned long __round_jiffies_up(unsigned long j, int cpu)
300{
301 return round_jiffies_common(j, cpu, true);
302}
303EXPORT_SYMBOL_GPL(__round_jiffies_up);
304
305/**
306 * __round_jiffies_up_relative - function to round jiffies up to a full second
307 * @j: the time in (relative) jiffies that should be rounded
308 * @cpu: the processor number on which the timeout will happen
309 *
310 * This is the same as __round_jiffies_relative() except that it will never
311 * round down. This is useful for timeouts for which the exact time
312 * of firing does not matter too much, as long as they don't fire too
313 * early.
314 */
315unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
316{
317 unsigned long j0 = jiffies;
318
319 /* Use j0 because jiffies might change while we run */
320 return round_jiffies_common(j + j0, cpu, true) - j0;
321}
322EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
323
324/**
325 * round_jiffies_up - function to round jiffies up to a full second
326 * @j: the time in (absolute) jiffies that should be rounded
327 *
328 * This is the same as round_jiffies() except that it will never
329 * round down. This is useful for timeouts for which the exact time
330 * of firing does not matter too much, as long as they don't fire too
331 * early.
332 */
333unsigned long round_jiffies_up(unsigned long j)
334{
335 return round_jiffies_common(j, raw_smp_processor_id(), true);
336}
337EXPORT_SYMBOL_GPL(round_jiffies_up);
338
339/**
340 * round_jiffies_up_relative - function to round jiffies up to a full second
341 * @j: the time in (relative) jiffies that should be rounded
342 *
343 * This is the same as round_jiffies_relative() except that it will never
344 * round down. This is useful for timeouts for which the exact time
345 * of firing does not matter too much, as long as they don't fire too
346 * early.
347 */
348unsigned long round_jiffies_up_relative(unsigned long j)
349{
350 return __round_jiffies_up_relative(j, raw_smp_processor_id());
351}
352EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
353
3bbb9ec9
AV
354/**
355 * set_timer_slack - set the allowed slack for a timer
0caa6210 356 * @timer: the timer to be modified
3bbb9ec9
AV
357 * @slack_hz: the amount of time (in jiffies) allowed for rounding
358 *
359 * Set the amount of time, in jiffies, that a certain timer has
360 * in terms of slack. By setting this value, the timer subsystem
361 * will schedule the actual timer somewhere between
362 * the time mod_timer() asks for, and that time plus the slack.
363 *
364 * By setting the slack to -1, a percentage of the delay is used
365 * instead.
366 */
367void set_timer_slack(struct timer_list *timer, int slack_hz)
368{
369 timer->slack = slack_hz;
370}
371EXPORT_SYMBOL_GPL(set_timer_slack);
372
facbb4a7
TG
373static void
374__internal_add_timer(struct tvec_base *base, struct timer_list *timer)
1da177e4
LT
375{
376 unsigned long expires = timer->expires;
377 unsigned long idx = expires - base->timer_jiffies;
1dabbcec 378 struct hlist_head *vec;
1da177e4
LT
379
380 if (idx < TVR_SIZE) {
381 int i = expires & TVR_MASK;
382 vec = base->tv1.vec + i;
383 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
384 int i = (expires >> TVR_BITS) & TVN_MASK;
385 vec = base->tv2.vec + i;
386 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
387 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
388 vec = base->tv3.vec + i;
389 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
390 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
391 vec = base->tv4.vec + i;
392 } else if ((signed long) idx < 0) {
393 /*
394 * Can happen if you add a timer with expires == jiffies,
395 * or you set a timer to go off in the past
396 */
397 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
398 } else {
399 int i;
26cff4e2
HC
400 /* If the timeout is larger than MAX_TVAL (on 64-bit
401 * architectures or with CONFIG_BASE_SMALL=1) then we
402 * use the maximum timeout.
1da177e4 403 */
26cff4e2
HC
404 if (idx > MAX_TVAL) {
405 idx = MAX_TVAL;
1da177e4
LT
406 expires = idx + base->timer_jiffies;
407 }
408 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
409 vec = base->tv5.vec + i;
410 }
1bd04bf6 411
1dabbcec 412 hlist_add_head(&timer->entry, vec);
1da177e4
LT
413}
414
facbb4a7
TG
415static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
416{
3bb475a3
TG
417 /* Advance base->jiffies, if the base is empty */
418 if (!base->all_timers++)
419 base->timer_jiffies = jiffies;
420
facbb4a7
TG
421 __internal_add_timer(base, timer);
422 /*
99d5f3aa 423 * Update base->active_timers and base->next_timer
facbb4a7 424 */
0eeda71b 425 if (!(timer->flags & TIMER_DEFERRABLE)) {
aea369b9
ON
426 if (!base->active_timers++ ||
427 time_before(timer->expires, base->next_timer))
99d5f3aa 428 base->next_timer = timer->expires;
99d5f3aa 429 }
9f6d9baa
VK
430
431 /*
432 * Check whether the other CPU is in dynticks mode and needs
433 * to be triggered to reevaluate the timer wheel.
434 * We are protected against the other CPU fiddling
435 * with the timer by holding the timer base lock. This also
436 * makes sure that a CPU on the way to stop its tick can not
437 * evaluate the timer wheel.
438 *
439 * Spare the IPI for deferrable timers on idle targets though.
440 * The next busy ticks will take care of it. Except full dynticks
441 * require special care against races with idle_cpu(), lets deal
442 * with that later.
443 */
683be13a
TG
444 if (base->nohz_active) {
445 if (!(timer->flags & TIMER_DEFERRABLE) ||
446 tick_nohz_full_cpu(base->cpu))
447 wake_up_nohz_cpu(base->cpu);
448 }
facbb4a7
TG
449}
450
82f67cd9
IM
451#ifdef CONFIG_TIMER_STATS
452void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
453{
454 if (timer->start_site)
455 return;
456
457 timer->start_site = addr;
458 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
459 timer->start_pid = current->pid;
460}
c5c061b8
VP
461
462static void timer_stats_account_timer(struct timer_list *timer)
463{
3ed769bd
DV
464 void *site;
465
466 /*
467 * start_site can be concurrently reset by
468 * timer_stats_timer_clear_start_info()
469 */
470 site = READ_ONCE(timer->start_site);
471 if (likely(!site))
507e1231 472 return;
c5c061b8 473
3ed769bd 474 timer_stats_update_stats(timer, timer->start_pid, site,
c74441a1
TG
475 timer->function, timer->start_comm,
476 timer->flags);
c5c061b8
VP
477}
478
479#else
480static void timer_stats_account_timer(struct timer_list *timer) {}
82f67cd9
IM
481#endif
482
c6f3a97f
TG
483#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
484
485static struct debug_obj_descr timer_debug_descr;
486
99777288
SG
487static void *timer_debug_hint(void *addr)
488{
489 return ((struct timer_list *) addr)->function;
490}
491
c6f3a97f
TG
492/*
493 * fixup_init is called when:
494 * - an active object is initialized
55c888d6 495 */
c6f3a97f
TG
496static int timer_fixup_init(void *addr, enum debug_obj_state state)
497{
498 struct timer_list *timer = addr;
499
500 switch (state) {
501 case ODEBUG_STATE_ACTIVE:
502 del_timer_sync(timer);
503 debug_object_init(timer, &timer_debug_descr);
504 return 1;
505 default:
506 return 0;
507 }
508}
509
fb16b8cf
SB
510/* Stub timer callback for improperly used timers. */
511static void stub_timer(unsigned long data)
512{
513 WARN_ON(1);
514}
515
c6f3a97f
TG
516/*
517 * fixup_activate is called when:
518 * - an active object is activated
519 * - an unknown object is activated (might be a statically initialized object)
520 */
521static int timer_fixup_activate(void *addr, enum debug_obj_state state)
522{
523 struct timer_list *timer = addr;
524
525 switch (state) {
526
527 case ODEBUG_STATE_NOTAVAILABLE:
528 /*
529 * This is not really a fixup. The timer was
530 * statically initialized. We just make sure that it
531 * is tracked in the object tracker.
532 */
1dabbcec
TG
533 if (timer->entry.pprev == NULL &&
534 timer->entry.next == TIMER_ENTRY_STATIC) {
c6f3a97f
TG
535 debug_object_init(timer, &timer_debug_descr);
536 debug_object_activate(timer, &timer_debug_descr);
537 return 0;
538 } else {
fb16b8cf
SB
539 setup_timer(timer, stub_timer, 0);
540 return 1;
c6f3a97f
TG
541 }
542 return 0;
543
544 case ODEBUG_STATE_ACTIVE:
545 WARN_ON(1);
546
547 default:
548 return 0;
549 }
550}
551
552/*
553 * fixup_free is called when:
554 * - an active object is freed
555 */
556static int timer_fixup_free(void *addr, enum debug_obj_state state)
557{
558 struct timer_list *timer = addr;
559
560 switch (state) {
561 case ODEBUG_STATE_ACTIVE:
562 del_timer_sync(timer);
563 debug_object_free(timer, &timer_debug_descr);
564 return 1;
565 default:
566 return 0;
567 }
568}
569
dc4218bd
CC
570/*
571 * fixup_assert_init is called when:
572 * - an untracked/uninit-ed object is found
573 */
574static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
575{
576 struct timer_list *timer = addr;
577
578 switch (state) {
579 case ODEBUG_STATE_NOTAVAILABLE:
1dabbcec 580 if (timer->entry.next == TIMER_ENTRY_STATIC) {
dc4218bd
CC
581 /*
582 * This is not really a fixup. The timer was
583 * statically initialized. We just make sure that it
584 * is tracked in the object tracker.
585 */
586 debug_object_init(timer, &timer_debug_descr);
587 return 0;
588 } else {
589 setup_timer(timer, stub_timer, 0);
590 return 1;
591 }
592 default:
593 return 0;
594 }
595}
596
c6f3a97f 597static struct debug_obj_descr timer_debug_descr = {
dc4218bd
CC
598 .name = "timer_list",
599 .debug_hint = timer_debug_hint,
600 .fixup_init = timer_fixup_init,
601 .fixup_activate = timer_fixup_activate,
602 .fixup_free = timer_fixup_free,
603 .fixup_assert_init = timer_fixup_assert_init,
c6f3a97f
TG
604};
605
606static inline void debug_timer_init(struct timer_list *timer)
607{
608 debug_object_init(timer, &timer_debug_descr);
609}
610
611static inline void debug_timer_activate(struct timer_list *timer)
612{
613 debug_object_activate(timer, &timer_debug_descr);
614}
615
616static inline void debug_timer_deactivate(struct timer_list *timer)
617{
618 debug_object_deactivate(timer, &timer_debug_descr);
619}
620
621static inline void debug_timer_free(struct timer_list *timer)
622{
623 debug_object_free(timer, &timer_debug_descr);
624}
625
dc4218bd
CC
626static inline void debug_timer_assert_init(struct timer_list *timer)
627{
628 debug_object_assert_init(timer, &timer_debug_descr);
629}
630
fc683995
TH
631static void do_init_timer(struct timer_list *timer, unsigned int flags,
632 const char *name, struct lock_class_key *key);
c6f3a97f 633
fc683995
TH
634void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
635 const char *name, struct lock_class_key *key)
c6f3a97f
TG
636{
637 debug_object_init_on_stack(timer, &timer_debug_descr);
fc683995 638 do_init_timer(timer, flags, name, key);
c6f3a97f 639}
6f2b9b9a 640EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
c6f3a97f
TG
641
642void destroy_timer_on_stack(struct timer_list *timer)
643{
644 debug_object_free(timer, &timer_debug_descr);
645}
646EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
647
648#else
649static inline void debug_timer_init(struct timer_list *timer) { }
650static inline void debug_timer_activate(struct timer_list *timer) { }
651static inline void debug_timer_deactivate(struct timer_list *timer) { }
dc4218bd 652static inline void debug_timer_assert_init(struct timer_list *timer) { }
c6f3a97f
TG
653#endif
654
2b022e3d
XG
655static inline void debug_init(struct timer_list *timer)
656{
657 debug_timer_init(timer);
658 trace_timer_init(timer);
659}
660
661static inline void
662debug_activate(struct timer_list *timer, unsigned long expires)
663{
664 debug_timer_activate(timer);
0eeda71b 665 trace_timer_start(timer, expires, timer->flags);
2b022e3d
XG
666}
667
668static inline void debug_deactivate(struct timer_list *timer)
669{
670 debug_timer_deactivate(timer);
671 trace_timer_cancel(timer);
672}
673
dc4218bd
CC
674static inline void debug_assert_init(struct timer_list *timer)
675{
676 debug_timer_assert_init(timer);
677}
678
fc683995
TH
679static void do_init_timer(struct timer_list *timer, unsigned int flags,
680 const char *name, struct lock_class_key *key)
55c888d6 681{
1dabbcec 682 timer->entry.pprev = NULL;
0eeda71b 683 timer->flags = flags | raw_smp_processor_id();
3bbb9ec9 684 timer->slack = -1;
82f67cd9
IM
685#ifdef CONFIG_TIMER_STATS
686 timer->start_site = NULL;
687 timer->start_pid = -1;
688 memset(timer->start_comm, 0, TASK_COMM_LEN);
689#endif
6f2b9b9a 690 lockdep_init_map(&timer->lockdep_map, name, key, 0);
55c888d6 691}
c6f3a97f
TG
692
693/**
633fe795 694 * init_timer_key - initialize a timer
c6f3a97f 695 * @timer: the timer to be initialized
fc683995 696 * @flags: timer flags
633fe795
RD
697 * @name: name of the timer
698 * @key: lockdep class key of the fake lock used for tracking timer
699 * sync lock dependencies
c6f3a97f 700 *
633fe795 701 * init_timer_key() must be done to a timer prior calling *any* of the
c6f3a97f
TG
702 * other timer functions.
703 */
fc683995
TH
704void init_timer_key(struct timer_list *timer, unsigned int flags,
705 const char *name, struct lock_class_key *key)
c6f3a97f 706{
2b022e3d 707 debug_init(timer);
fc683995 708 do_init_timer(timer, flags, name, key);
c6f3a97f 709}
6f2b9b9a 710EXPORT_SYMBOL(init_timer_key);
55c888d6 711
ec44bc7a 712static inline void detach_timer(struct timer_list *timer, bool clear_pending)
55c888d6 713{
1dabbcec 714 struct hlist_node *entry = &timer->entry;
55c888d6 715
2b022e3d 716 debug_deactivate(timer);
c6f3a97f 717
1dabbcec 718 __hlist_del(entry);
55c888d6 719 if (clear_pending)
1dabbcec
TG
720 entry->pprev = NULL;
721 entry->next = LIST_POISON2;
55c888d6
ON
722}
723
99d5f3aa
TG
724static inline void
725detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
726{
727 detach_timer(timer, true);
0eeda71b 728 if (!(timer->flags & TIMER_DEFERRABLE))
e52b1db3 729 base->active_timers--;
fff42158 730 base->all_timers--;
99d5f3aa
TG
731}
732
ec44bc7a
TG
733static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
734 bool clear_pending)
735{
736 if (!timer_pending(timer))
737 return 0;
738
739 detach_timer(timer, clear_pending);
0eeda71b 740 if (!(timer->flags & TIMER_DEFERRABLE)) {
e52b1db3 741 base->active_timers--;
99d5f3aa
TG
742 if (timer->expires == base->next_timer)
743 base->next_timer = base->timer_jiffies;
744 }
3bb475a3
TG
745 /* If this was the last timer, advance base->jiffies */
746 if (!--base->all_timers)
747 base->timer_jiffies = jiffies;
ec44bc7a
TG
748 return 1;
749}
750
55c888d6 751/*
3691c519 752 * We are using hashed locking: holding per_cpu(tvec_bases).lock
55c888d6
ON
753 * means that all timers which are tied to this base via timer->base are
754 * locked, and the base itself is locked too.
755 *
756 * So __run_timers/migrate_timers can safely modify all timers which could
757 * be found on ->tvX lists.
758 *
0eeda71b
TG
759 * When the timer's base is locked and removed from the list, the
760 * TIMER_MIGRATING flag is set, FIXME
55c888d6 761 */
a6fa8e5a 762static struct tvec_base *lock_timer_base(struct timer_list *timer,
55c888d6 763 unsigned long *flags)
89e7e374 764 __acquires(timer->base->lock)
55c888d6 765{
55c888d6 766 for (;;) {
0eeda71b
TG
767 u32 tf = timer->flags;
768 struct tvec_base *base;
769
770 if (!(tf & TIMER_MIGRATING)) {
771 base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK);
55c888d6 772 spin_lock_irqsave(&base->lock, *flags);
0eeda71b 773 if (timer->flags == tf)
55c888d6 774 return base;
55c888d6
ON
775 spin_unlock_irqrestore(&base->lock, *flags);
776 }
777 cpu_relax();
778 }
779}
780
74019224 781static inline int
597d0275 782__mod_timer(struct timer_list *timer, unsigned long expires,
bc7a34b8 783 bool pending_only, int pinned)
1da177e4 784{
a6fa8e5a 785 struct tvec_base *base, *new_base;
1da177e4 786 unsigned long flags;
bc7a34b8 787 int ret = 0;
1da177e4 788
82f67cd9 789 timer_stats_timer_set_start_info(timer);
1da177e4 790 BUG_ON(!timer->function);
1da177e4 791
55c888d6
ON
792 base = lock_timer_base(timer, &flags);
793
ec44bc7a
TG
794 ret = detach_if_pending(timer, base, false);
795 if (!ret && pending_only)
796 goto out_unlock;
55c888d6 797
2b022e3d 798 debug_activate(timer, expires);
c6f3a97f 799
bc7a34b8 800 new_base = get_target_base(base, pinned);
eea08f32 801
3691c519 802 if (base != new_base) {
1da177e4 803 /*
55c888d6
ON
804 * We are trying to schedule the timer on the local CPU.
805 * However we can't change timer's base while it is running,
806 * otherwise del_timer_sync() can't detect that the timer's
807 * handler yet has not finished. This also guarantees that
808 * the timer is serialized wrt itself.
1da177e4 809 */
a2c348fe 810 if (likely(base->running_timer != timer)) {
55c888d6 811 /* See the comment in lock_timer_base() */
0eeda71b
TG
812 timer->flags |= TIMER_MIGRATING;
813
55c888d6 814 spin_unlock(&base->lock);
a2c348fe
ON
815 base = new_base;
816 spin_lock(&base->lock);
d0023a14
ED
817 WRITE_ONCE(timer->flags,
818 (timer->flags & ~TIMER_BASEMASK) | base->cpu);
1da177e4
LT
819 }
820 }
821
1da177e4 822 timer->expires = expires;
a2c348fe 823 internal_add_timer(base, timer);
74019224
IM
824
825out_unlock:
a2c348fe 826 spin_unlock_irqrestore(&base->lock, flags);
1da177e4
LT
827
828 return ret;
829}
830
2aae4a10 831/**
74019224
IM
832 * mod_timer_pending - modify a pending timer's timeout
833 * @timer: the pending timer to be modified
834 * @expires: new timeout in jiffies
1da177e4 835 *
74019224
IM
836 * mod_timer_pending() is the same for pending timers as mod_timer(),
837 * but will not re-activate and modify already deleted timers.
838 *
839 * It is useful for unserialized use of timers.
1da177e4 840 */
74019224 841int mod_timer_pending(struct timer_list *timer, unsigned long expires)
1da177e4 842{
597d0275 843 return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
1da177e4 844}
74019224 845EXPORT_SYMBOL(mod_timer_pending);
1da177e4 846
3bbb9ec9
AV
847/*
848 * Decide where to put the timer while taking the slack into account
849 *
850 * Algorithm:
851 * 1) calculate the maximum (absolute) time
852 * 2) calculate the highest bit where the expires and new max are different
853 * 3) use this bit to make a mask
854 * 4) use the bitmask to round down the maximum time, so that all last
855 * bits are zeros
856 */
857static inline
858unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
859{
860 unsigned long expires_limit, mask;
861 int bit;
862
8e63d779 863 if (timer->slack >= 0) {
f00e047e 864 expires_limit = expires + timer->slack;
8e63d779 865 } else {
1c3cc116
SAS
866 long delta = expires - jiffies;
867
868 if (delta < 256)
869 return expires;
3bbb9ec9 870
1c3cc116 871 expires_limit = expires + delta / 256;
8e63d779 872 }
3bbb9ec9 873 mask = expires ^ expires_limit;
3bbb9ec9
AV
874 if (mask == 0)
875 return expires;
876
9fc4468d 877 bit = __fls(mask);
3bbb9ec9 878
98a01e77 879 mask = (1UL << bit) - 1;
3bbb9ec9
AV
880
881 expires_limit = expires_limit & ~(mask);
882
883 return expires_limit;
884}
885
2aae4a10 886/**
1da177e4
LT
887 * mod_timer - modify a timer's timeout
888 * @timer: the timer to be modified
2aae4a10 889 * @expires: new timeout in jiffies
1da177e4 890 *
72fd4a35 891 * mod_timer() is a more efficient way to update the expire field of an
1da177e4
LT
892 * active timer (if the timer is inactive it will be activated)
893 *
894 * mod_timer(timer, expires) is equivalent to:
895 *
896 * del_timer(timer); timer->expires = expires; add_timer(timer);
897 *
898 * Note that if there are multiple unserialized concurrent users of the
899 * same timer, then mod_timer() is the only safe way to modify the timeout,
900 * since add_timer() cannot modify an already running timer.
901 *
902 * The function returns whether it has modified a pending timer or not.
903 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
904 * active timer returns 1.)
905 */
906int mod_timer(struct timer_list *timer, unsigned long expires)
907{
1c3cc116
SAS
908 expires = apply_slack(timer, expires);
909
1da177e4
LT
910 /*
911 * This is a common optimization triggered by the
912 * networking code - if the timer is re-modified
913 * to be the same thing then just return:
914 */
4841158b 915 if (timer_pending(timer) && timer->expires == expires)
1da177e4
LT
916 return 1;
917
597d0275 918 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
1da177e4 919}
1da177e4
LT
920EXPORT_SYMBOL(mod_timer);
921
597d0275
AB
922/**
923 * mod_timer_pinned - modify a timer's timeout
924 * @timer: the timer to be modified
925 * @expires: new timeout in jiffies
926 *
927 * mod_timer_pinned() is a way to update the expire field of an
928 * active timer (if the timer is inactive it will be activated)
048a0e8f
PM
929 * and to ensure that the timer is scheduled on the current CPU.
930 *
931 * Note that this does not prevent the timer from being migrated
932 * when the current CPU goes offline. If this is a problem for
933 * you, use CPU-hotplug notifiers to handle it correctly, for
934 * example, cancelling the timer when the corresponding CPU goes
935 * offline.
597d0275
AB
936 *
937 * mod_timer_pinned(timer, expires) is equivalent to:
938 *
939 * del_timer(timer); timer->expires = expires; add_timer(timer);
940 */
941int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
942{
943 if (timer->expires == expires && timer_pending(timer))
944 return 1;
945
946 return __mod_timer(timer, expires, false, TIMER_PINNED);
947}
948EXPORT_SYMBOL(mod_timer_pinned);
949
74019224
IM
950/**
951 * add_timer - start a timer
952 * @timer: the timer to be added
953 *
954 * The kernel will do a ->function(->data) callback from the
955 * timer interrupt at the ->expires point in the future. The
956 * current time is 'jiffies'.
957 *
958 * The timer's ->expires, ->function (and if the handler uses it, ->data)
959 * fields must be set prior calling this function.
960 *
961 * Timers with an ->expires field in the past will be executed in the next
962 * timer tick.
963 */
964void add_timer(struct timer_list *timer)
965{
966 BUG_ON(timer_pending(timer));
967 mod_timer(timer, timer->expires);
968}
969EXPORT_SYMBOL(add_timer);
970
971/**
972 * add_timer_on - start a timer on a particular CPU
973 * @timer: the timer to be added
974 * @cpu: the CPU to start it on
975 *
976 * This is not very scalable on SMP. Double adds are not possible.
977 */
978void add_timer_on(struct timer_list *timer, int cpu)
979{
22b886dd
TH
980 struct tvec_base *new_base = per_cpu_ptr(&tvec_bases, cpu);
981 struct tvec_base *base;
74019224
IM
982 unsigned long flags;
983
984 timer_stats_timer_set_start_info(timer);
985 BUG_ON(timer_pending(timer) || !timer->function);
22b886dd
TH
986
987 /*
988 * If @timer was on a different CPU, it should be migrated with the
989 * old base locked to prevent other operations proceeding with the
990 * wrong base locked. See lock_timer_base().
991 */
992 base = lock_timer_base(timer, &flags);
993 if (base != new_base) {
994 timer->flags |= TIMER_MIGRATING;
995
996 spin_unlock(&base->lock);
997 base = new_base;
998 spin_lock(&base->lock);
999 WRITE_ONCE(timer->flags,
1000 (timer->flags & ~TIMER_BASEMASK) | cpu);
1001 }
1002
2b022e3d 1003 debug_activate(timer, timer->expires);
74019224 1004 internal_add_timer(base, timer);
74019224
IM
1005 spin_unlock_irqrestore(&base->lock, flags);
1006}
a9862e05 1007EXPORT_SYMBOL_GPL(add_timer_on);
74019224 1008
2aae4a10 1009/**
1da177e4
LT
1010 * del_timer - deactive a timer.
1011 * @timer: the timer to be deactivated
1012 *
1013 * del_timer() deactivates a timer - this works on both active and inactive
1014 * timers.
1015 *
1016 * The function returns whether it has deactivated a pending timer or not.
1017 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
1018 * active timer returns 1.)
1019 */
1020int del_timer(struct timer_list *timer)
1021{
a6fa8e5a 1022 struct tvec_base *base;
1da177e4 1023 unsigned long flags;
55c888d6 1024 int ret = 0;
1da177e4 1025
dc4218bd
CC
1026 debug_assert_init(timer);
1027
82f67cd9 1028 timer_stats_timer_clear_start_info(timer);
55c888d6
ON
1029 if (timer_pending(timer)) {
1030 base = lock_timer_base(timer, &flags);
ec44bc7a 1031 ret = detach_if_pending(timer, base, true);
1da177e4 1032 spin_unlock_irqrestore(&base->lock, flags);
1da177e4 1033 }
1da177e4 1034
55c888d6 1035 return ret;
1da177e4 1036}
1da177e4
LT
1037EXPORT_SYMBOL(del_timer);
1038
2aae4a10
REB
1039/**
1040 * try_to_del_timer_sync - Try to deactivate a timer
1041 * @timer: timer do del
1042 *
fd450b73
ON
1043 * This function tries to deactivate a timer. Upon successful (ret >= 0)
1044 * exit the timer is not queued and the handler is not running on any CPU.
fd450b73
ON
1045 */
1046int try_to_del_timer_sync(struct timer_list *timer)
1047{
a6fa8e5a 1048 struct tvec_base *base;
fd450b73
ON
1049 unsigned long flags;
1050 int ret = -1;
1051
dc4218bd
CC
1052 debug_assert_init(timer);
1053
fd450b73
ON
1054 base = lock_timer_base(timer, &flags);
1055
ec44bc7a
TG
1056 if (base->running_timer != timer) {
1057 timer_stats_timer_clear_start_info(timer);
1058 ret = detach_if_pending(timer, base, true);
fd450b73 1059 }
fd450b73
ON
1060 spin_unlock_irqrestore(&base->lock, flags);
1061
1062 return ret;
1063}
e19dff1f
DH
1064EXPORT_SYMBOL(try_to_del_timer_sync);
1065
6f1bc451 1066#ifdef CONFIG_SMP
2aae4a10 1067/**
1da177e4
LT
1068 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1069 * @timer: the timer to be deactivated
1070 *
1071 * This function only differs from del_timer() on SMP: besides deactivating
1072 * the timer it also makes sure the handler has finished executing on other
1073 * CPUs.
1074 *
72fd4a35 1075 * Synchronization rules: Callers must prevent restarting of the timer,
1da177e4 1076 * otherwise this function is meaningless. It must not be called from
c5f66e99
TH
1077 * interrupt contexts unless the timer is an irqsafe one. The caller must
1078 * not hold locks which would prevent completion of the timer's
1079 * handler. The timer's handler must not call add_timer_on(). Upon exit the
1080 * timer is not queued and the handler is not running on any CPU.
1da177e4 1081 *
c5f66e99
TH
1082 * Note: For !irqsafe timers, you must not hold locks that are held in
1083 * interrupt context while calling this function. Even if the lock has
1084 * nothing to do with the timer in question. Here's why:
48228f7b
SR
1085 *
1086 * CPU0 CPU1
1087 * ---- ----
1088 * <SOFTIRQ>
1089 * call_timer_fn();
1090 * base->running_timer = mytimer;
1091 * spin_lock_irq(somelock);
1092 * <IRQ>
1093 * spin_lock(somelock);
1094 * del_timer_sync(mytimer);
1095 * while (base->running_timer == mytimer);
1096 *
1097 * Now del_timer_sync() will never return and never release somelock.
1098 * The interrupt on the other CPU is waiting to grab somelock but
1099 * it has interrupted the softirq that CPU0 is waiting to finish.
1100 *
1da177e4 1101 * The function returns whether it has deactivated a pending timer or not.
1da177e4
LT
1102 */
1103int del_timer_sync(struct timer_list *timer)
1104{
6f2b9b9a 1105#ifdef CONFIG_LOCKDEP
f266a511
PZ
1106 unsigned long flags;
1107
48228f7b
SR
1108 /*
1109 * If lockdep gives a backtrace here, please reference
1110 * the synchronization rules above.
1111 */
7ff20792 1112 local_irq_save(flags);
6f2b9b9a
JB
1113 lock_map_acquire(&timer->lockdep_map);
1114 lock_map_release(&timer->lockdep_map);
7ff20792 1115 local_irq_restore(flags);
6f2b9b9a 1116#endif
466bd303
YZ
1117 /*
1118 * don't use it in hardirq context, because it
1119 * could lead to deadlock.
1120 */
0eeda71b 1121 WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
fd450b73
ON
1122 for (;;) {
1123 int ret = try_to_del_timer_sync(timer);
1124 if (ret >= 0)
1125 return ret;
a0009652 1126 cpu_relax();
fd450b73 1127 }
1da177e4 1128}
55c888d6 1129EXPORT_SYMBOL(del_timer_sync);
1da177e4
LT
1130#endif
1131
a6fa8e5a 1132static int cascade(struct tvec_base *base, struct tvec *tv, int index)
1da177e4
LT
1133{
1134 /* cascade all the timers from tv up one level */
1dabbcec
TG
1135 struct timer_list *timer;
1136 struct hlist_node *tmp;
1137 struct hlist_head tv_list;
3439dd86 1138
1dabbcec 1139 hlist_move_list(tv->vec + index, &tv_list);
1da177e4 1140
1da177e4 1141 /*
3439dd86
P
1142 * We are removing _all_ timers from the list, so we
1143 * don't have to detach them individually.
1da177e4 1144 */
1dabbcec 1145 hlist_for_each_entry_safe(timer, tmp, &tv_list, entry) {
facbb4a7
TG
1146 /* No accounting, while moving them */
1147 __internal_add_timer(base, timer);
1da177e4 1148 }
1da177e4
LT
1149
1150 return index;
1151}
1152
576da126
TG
1153static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1154 unsigned long data)
1155{
4a2b4b22 1156 int count = preempt_count();
576da126
TG
1157
1158#ifdef CONFIG_LOCKDEP
1159 /*
1160 * It is permissible to free the timer from inside the
1161 * function that is called from it, this we need to take into
1162 * account for lockdep too. To avoid bogus "held lock freed"
1163 * warnings as well as problems when looking into
1164 * timer->lockdep_map, make a copy and use that here.
1165 */
4d82a1de
PZ
1166 struct lockdep_map lockdep_map;
1167
1168 lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
576da126
TG
1169#endif
1170 /*
1171 * Couple the lock chain with the lock chain at
1172 * del_timer_sync() by acquiring the lock_map around the fn()
1173 * call here and in del_timer_sync().
1174 */
1175 lock_map_acquire(&lockdep_map);
1176
1177 trace_timer_expire_entry(timer);
1178 fn(data);
1179 trace_timer_expire_exit(timer);
1180
1181 lock_map_release(&lockdep_map);
1182
4a2b4b22 1183 if (count != preempt_count()) {
802702e0 1184 WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
4a2b4b22 1185 fn, count, preempt_count());
802702e0
TG
1186 /*
1187 * Restore the preempt count. That gives us a decent
1188 * chance to survive and extract information. If the
1189 * callback kept a lock held, bad luck, but not worse
1190 * than the BUG() we had.
1191 */
4a2b4b22 1192 preempt_count_set(count);
576da126
TG
1193 }
1194}
1195
2aae4a10
REB
1196#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
1197
1198/**
1da177e4
LT
1199 * __run_timers - run all expired timers (if any) on this CPU.
1200 * @base: the timer vector to be processed.
1201 *
1202 * This function cascades all vectors and executes all expired timer
1203 * vectors.
1204 */
a6fa8e5a 1205static inline void __run_timers(struct tvec_base *base)
1da177e4
LT
1206{
1207 struct timer_list *timer;
1208
3691c519 1209 spin_lock_irq(&base->lock);
3bb475a3 1210
1da177e4 1211 while (time_after_eq(jiffies, base->timer_jiffies)) {
1dabbcec
TG
1212 struct hlist_head work_list;
1213 struct hlist_head *head = &work_list;
3bb475a3
TG
1214 int index;
1215
1216 if (!base->all_timers) {
1217 base->timer_jiffies = jiffies;
1218 break;
1219 }
1220
1221 index = base->timer_jiffies & TVR_MASK;
626ab0e6 1222
1da177e4
LT
1223 /*
1224 * Cascade timers:
1225 */
1226 if (!index &&
1227 (!cascade(base, &base->tv2, INDEX(0))) &&
1228 (!cascade(base, &base->tv3, INDEX(1))) &&
1229 !cascade(base, &base->tv4, INDEX(2)))
1230 cascade(base, &base->tv5, INDEX(3));
626ab0e6 1231 ++base->timer_jiffies;
1dabbcec
TG
1232 hlist_move_list(base->tv1.vec + index, head);
1233 while (!hlist_empty(head)) {
1da177e4
LT
1234 void (*fn)(unsigned long);
1235 unsigned long data;
c5f66e99 1236 bool irqsafe;
1da177e4 1237
1dabbcec 1238 timer = hlist_entry(head->first, struct timer_list, entry);
6819457d
TG
1239 fn = timer->function;
1240 data = timer->data;
0eeda71b 1241 irqsafe = timer->flags & TIMER_IRQSAFE;
1da177e4 1242
82f67cd9
IM
1243 timer_stats_account_timer(timer);
1244
6f1bc451 1245 base->running_timer = timer;
99d5f3aa 1246 detach_expired_timer(timer, base);
6f2b9b9a 1247
c5f66e99
TH
1248 if (irqsafe) {
1249 spin_unlock(&base->lock);
1250 call_timer_fn(timer, fn, data);
1251 spin_lock(&base->lock);
1252 } else {
1253 spin_unlock_irq(&base->lock);
1254 call_timer_fn(timer, fn, data);
1255 spin_lock_irq(&base->lock);
1256 }
1da177e4
LT
1257 }
1258 }
6f1bc451 1259 base->running_timer = NULL;
3691c519 1260 spin_unlock_irq(&base->lock);
1da177e4
LT
1261}
1262
3451d024 1263#ifdef CONFIG_NO_HZ_COMMON
1da177e4
LT
1264/*
1265 * Find out when the next timer event is due to happen. This
90cba64a
RD
1266 * is used on S/390 to stop all activity when a CPU is idle.
1267 * This function needs to be called with interrupts disabled.
1da177e4 1268 */
a6fa8e5a 1269static unsigned long __next_timer_interrupt(struct tvec_base *base)
1da177e4 1270{
1cfd6849 1271 unsigned long timer_jiffies = base->timer_jiffies;
eaad084b 1272 unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
1cfd6849 1273 int index, slot, array, found = 0;
1da177e4 1274 struct timer_list *nte;
a6fa8e5a 1275 struct tvec *varray[4];
1da177e4
LT
1276
1277 /* Look for timer events in tv1. */
1cfd6849 1278 index = slot = timer_jiffies & TVR_MASK;
1da177e4 1279 do {
1dabbcec 1280 hlist_for_each_entry(nte, base->tv1.vec + slot, entry) {
0eeda71b 1281 if (nte->flags & TIMER_DEFERRABLE)
6819457d 1282 continue;
6e453a67 1283
1cfd6849 1284 found = 1;
1da177e4 1285 expires = nte->expires;
1cfd6849
TG
1286 /* Look at the cascade bucket(s)? */
1287 if (!index || slot < index)
1288 goto cascade;
1289 return expires;
1da177e4 1290 }
1cfd6849
TG
1291 slot = (slot + 1) & TVR_MASK;
1292 } while (slot != index);
1293
1294cascade:
1295 /* Calculate the next cascade event */
1296 if (index)
1297 timer_jiffies += TVR_SIZE - index;
1298 timer_jiffies >>= TVR_BITS;
1da177e4
LT
1299
1300 /* Check tv2-tv5. */
1301 varray[0] = &base->tv2;
1302 varray[1] = &base->tv3;
1303 varray[2] = &base->tv4;
1304 varray[3] = &base->tv5;
1cfd6849
TG
1305
1306 for (array = 0; array < 4; array++) {
a6fa8e5a 1307 struct tvec *varp = varray[array];
1cfd6849
TG
1308
1309 index = slot = timer_jiffies & TVN_MASK;
1da177e4 1310 do {
1dabbcec 1311 hlist_for_each_entry(nte, varp->vec + slot, entry) {
0eeda71b 1312 if (nte->flags & TIMER_DEFERRABLE)
a0419888
JH
1313 continue;
1314
1cfd6849 1315 found = 1;
1da177e4
LT
1316 if (time_before(nte->expires, expires))
1317 expires = nte->expires;
1cfd6849
TG
1318 }
1319 /*
1320 * Do we still search for the first timer or are
1321 * we looking up the cascade buckets ?
1322 */
1323 if (found) {
1324 /* Look at the cascade bucket(s)? */
1325 if (!index || slot < index)
1326 break;
1327 return expires;
1328 }
1329 slot = (slot + 1) & TVN_MASK;
1330 } while (slot != index);
1331
1332 if (index)
1333 timer_jiffies += TVN_SIZE - index;
1334 timer_jiffies >>= TVN_BITS;
1da177e4 1335 }
1cfd6849
TG
1336 return expires;
1337}
69239749 1338
1cfd6849
TG
1339/*
1340 * Check, if the next hrtimer event is before the next timer wheel
1341 * event:
1342 */
c1ad348b 1343static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
1cfd6849 1344{
c1ad348b 1345 u64 nextevt = hrtimer_get_next_event();
0662b713 1346
9501b6cf 1347 /*
c1ad348b
TG
1348 * If high resolution timers are enabled
1349 * hrtimer_get_next_event() returns KTIME_MAX.
9501b6cf 1350 */
c1ad348b
TG
1351 if (expires <= nextevt)
1352 return expires;
eaad084b
TG
1353
1354 /*
c1ad348b
TG
1355 * If the next timer is already expired, return the tick base
1356 * time so the tick is fired immediately.
eaad084b 1357 */
c1ad348b
TG
1358 if (nextevt <= basem)
1359 return basem;
eaad084b 1360
9501b6cf 1361 /*
c1ad348b
TG
1362 * Round up to the next jiffie. High resolution timers are
1363 * off, so the hrtimers are expired in the tick and we need to
1364 * make sure that this tick really expires the timer to avoid
1365 * a ping pong of the nohz stop code.
1366 *
1367 * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3
9501b6cf 1368 */
c1ad348b 1369 return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
1da177e4 1370}
1cfd6849
TG
1371
1372/**
c1ad348b
TG
1373 * get_next_timer_interrupt - return the time (clock mono) of the next timer
1374 * @basej: base time jiffies
1375 * @basem: base time clock monotonic
1376 *
1377 * Returns the tick aligned clock monotonic time of the next pending
1378 * timer or KTIME_MAX if no timer is pending.
1cfd6849 1379 */
c1ad348b 1380u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1cfd6849 1381{
0eeda71b 1382 struct tvec_base *base = this_cpu_ptr(&tvec_bases);
c1ad348b
TG
1383 u64 expires = KTIME_MAX;
1384 unsigned long nextevt;
1cfd6849 1385
dbd87b5a
HC
1386 /*
1387 * Pretend that there is no timer pending if the cpu is offline.
1388 * Possible pending timers will be migrated later to an active cpu.
1389 */
1390 if (cpu_is_offline(smp_processor_id()))
e40468a5
TG
1391 return expires;
1392
1cfd6849 1393 spin_lock(&base->lock);
e40468a5
TG
1394 if (base->active_timers) {
1395 if (time_before_eq(base->next_timer, base->timer_jiffies))
1396 base->next_timer = __next_timer_interrupt(base);
c1ad348b
TG
1397 nextevt = base->next_timer;
1398 if (time_before_eq(nextevt, basej))
1399 expires = basem;
1400 else
1401 expires = basem + (nextevt - basej) * TICK_NSEC;
e40468a5 1402 }
1cfd6849
TG
1403 spin_unlock(&base->lock);
1404
c1ad348b 1405 return cmp_next_hrtimer_event(basem, expires);
1cfd6849 1406}
1da177e4
LT
1407#endif
1408
1da177e4 1409/*
5b4db0c2 1410 * Called from the timer interrupt handler to charge one tick to the current
1da177e4
LT
1411 * process. user_tick is 1 if the tick is user time, 0 for system.
1412 */
1413void update_process_times(int user_tick)
1414{
1415 struct task_struct *p = current;
1da177e4
LT
1416
1417 /* Note: this timer irq context must be accounted for as well. */
fa13a5a1 1418 account_process_tick(p, user_tick);
1da177e4 1419 run_local_timers();
c3377c2d 1420 rcu_check_callbacks(user_tick);
e360adbe
PZ
1421#ifdef CONFIG_IRQ_WORK
1422 if (in_irq())
76a33061 1423 irq_work_tick();
e360adbe 1424#endif
1da177e4 1425 scheduler_tick();
6819457d 1426 run_posix_cpu_timers(p);
1da177e4
LT
1427}
1428
1da177e4
LT
1429/*
1430 * This function runs timers and the timer-tq in bottom half context.
1431 */
1432static void run_timer_softirq(struct softirq_action *h)
1433{
0eeda71b 1434 struct tvec_base *base = this_cpu_ptr(&tvec_bases);
1da177e4
LT
1435
1436 if (time_after_eq(jiffies, base->timer_jiffies))
1437 __run_timers(base);
1438}
1439
1440/*
1441 * Called by the local, per-CPU timer interrupt on SMP.
1442 */
1443void run_local_timers(void)
1444{
d3d74453 1445 hrtimer_run_queues();
1da177e4
LT
1446 raise_softirq(TIMER_SOFTIRQ);
1447}
1448
1da177e4
LT
1449#ifdef __ARCH_WANT_SYS_ALARM
1450
1451/*
1452 * For backwards compatibility? This can be done in libc so Alpha
1453 * and all newer ports shouldn't need it.
1454 */
58fd3aa2 1455SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1da177e4 1456{
c08b8a49 1457 return alarm_setitimer(seconds);
1da177e4
LT
1458}
1459
1460#endif
1461
1da177e4
LT
1462static void process_timeout(unsigned long __data)
1463{
36c8b586 1464 wake_up_process((struct task_struct *)__data);
1da177e4
LT
1465}
1466
1467/**
1468 * schedule_timeout - sleep until timeout
1469 * @timeout: timeout value in jiffies
1470 *
1471 * Make the current task sleep until @timeout jiffies have
1472 * elapsed. The routine will return immediately unless
1473 * the current task state has been set (see set_current_state()).
1474 *
1475 * You can set the task state as follows -
1476 *
1477 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1478 * pass before the routine returns. The routine will return 0
1479 *
1480 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1481 * delivered to the current task. In this case the remaining time
1482 * in jiffies will be returned, or 0 if the timer expired in time
1483 *
1484 * The current task state is guaranteed to be TASK_RUNNING when this
1485 * routine returns.
1486 *
1487 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1488 * the CPU away without a bound on the timeout. In this case the return
1489 * value will be %MAX_SCHEDULE_TIMEOUT.
1490 *
1491 * In all cases the return value is guaranteed to be non-negative.
1492 */
7ad5b3a5 1493signed long __sched schedule_timeout(signed long timeout)
1da177e4
LT
1494{
1495 struct timer_list timer;
1496 unsigned long expire;
1497
1498 switch (timeout)
1499 {
1500 case MAX_SCHEDULE_TIMEOUT:
1501 /*
1502 * These two special cases are useful to be comfortable
1503 * in the caller. Nothing more. We could take
1504 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1505 * but I' d like to return a valid offset (>=0) to allow
1506 * the caller to do everything it want with the retval.
1507 */
1508 schedule();
1509 goto out;
1510 default:
1511 /*
1512 * Another bit of PARANOID. Note that the retval will be
1513 * 0 since no piece of kernel is supposed to do a check
1514 * for a negative retval of schedule_timeout() (since it
1515 * should never happens anyway). You just have the printk()
1516 * that will tell you if something is gone wrong and where.
1517 */
5b149bcc 1518 if (timeout < 0) {
1da177e4 1519 printk(KERN_ERR "schedule_timeout: wrong timeout "
5b149bcc
AM
1520 "value %lx\n", timeout);
1521 dump_stack();
1da177e4
LT
1522 current->state = TASK_RUNNING;
1523 goto out;
1524 }
1525 }
1526
1527 expire = timeout + jiffies;
1528
c6f3a97f 1529 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
597d0275 1530 __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
1da177e4
LT
1531 schedule();
1532 del_singleshot_timer_sync(&timer);
1533
c6f3a97f
TG
1534 /* Remove the timer from the object tracker */
1535 destroy_timer_on_stack(&timer);
1536
1da177e4
LT
1537 timeout = expire - jiffies;
1538
1539 out:
1540 return timeout < 0 ? 0 : timeout;
1541}
1da177e4
LT
1542EXPORT_SYMBOL(schedule_timeout);
1543
8a1c1757
AM
1544/*
1545 * We can use __set_current_state() here because schedule_timeout() calls
1546 * schedule() unconditionally.
1547 */
64ed93a2
NA
1548signed long __sched schedule_timeout_interruptible(signed long timeout)
1549{
a5a0d52c
AM
1550 __set_current_state(TASK_INTERRUPTIBLE);
1551 return schedule_timeout(timeout);
64ed93a2
NA
1552}
1553EXPORT_SYMBOL(schedule_timeout_interruptible);
1554
294d5cc2
MW
1555signed long __sched schedule_timeout_killable(signed long timeout)
1556{
1557 __set_current_state(TASK_KILLABLE);
1558 return schedule_timeout(timeout);
1559}
1560EXPORT_SYMBOL(schedule_timeout_killable);
1561
64ed93a2
NA
1562signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1563{
a5a0d52c
AM
1564 __set_current_state(TASK_UNINTERRUPTIBLE);
1565 return schedule_timeout(timeout);
64ed93a2
NA
1566}
1567EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1568
69b27baf
AM
1569/*
1570 * Like schedule_timeout_uninterruptible(), except this task will not contribute
1571 * to load average.
1572 */
1573signed long __sched schedule_timeout_idle(signed long timeout)
1574{
1575 __set_current_state(TASK_IDLE);
1576 return schedule_timeout(timeout);
1577}
1578EXPORT_SYMBOL(schedule_timeout_idle);
1579
1da177e4 1580#ifdef CONFIG_HOTPLUG_CPU
1dabbcec 1581static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *head)
1da177e4
LT
1582{
1583 struct timer_list *timer;
0eeda71b 1584 int cpu = new_base->cpu;
1da177e4 1585
1dabbcec
TG
1586 while (!hlist_empty(head)) {
1587 timer = hlist_entry(head->first, struct timer_list, entry);
99d5f3aa 1588 /* We ignore the accounting on the dying cpu */
ec44bc7a 1589 detach_timer(timer, false);
0eeda71b 1590 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
1da177e4 1591 internal_add_timer(new_base, timer);
1da177e4 1592 }
1da177e4
LT
1593}
1594
0db0628d 1595static void migrate_timers(int cpu)
1da177e4 1596{
a6fa8e5a
PM
1597 struct tvec_base *old_base;
1598 struct tvec_base *new_base;
1da177e4
LT
1599 int i;
1600
1601 BUG_ON(cpu_online(cpu));
0eeda71b 1602 old_base = per_cpu_ptr(&tvec_bases, cpu);
24bfcb10 1603 new_base = get_cpu_ptr(&tvec_bases);
d82f0b0f
ON
1604 /*
1605 * The caller is globally serialized and nobody else
1606 * takes two locks at once, deadlock is not possible.
1607 */
1608 spin_lock_irq(&new_base->lock);
0d180406 1609 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
3691c519
ON
1610
1611 BUG_ON(old_base->running_timer);
1da177e4 1612
1da177e4 1613 for (i = 0; i < TVR_SIZE; i++)
55c888d6
ON
1614 migrate_timer_list(new_base, old_base->tv1.vec + i);
1615 for (i = 0; i < TVN_SIZE; i++) {
1616 migrate_timer_list(new_base, old_base->tv2.vec + i);
1617 migrate_timer_list(new_base, old_base->tv3.vec + i);
1618 migrate_timer_list(new_base, old_base->tv4.vec + i);
1619 migrate_timer_list(new_base, old_base->tv5.vec + i);
1620 }
1621
8def9060
VK
1622 old_base->active_timers = 0;
1623 old_base->all_timers = 0;
1624
0d180406 1625 spin_unlock(&old_base->lock);
d82f0b0f 1626 spin_unlock_irq(&new_base->lock);
24bfcb10 1627 put_cpu_ptr(&tvec_bases);
1da177e4 1628}
1da177e4 1629
0db0628d 1630static int timer_cpu_notify(struct notifier_block *self,
1da177e4
LT
1631 unsigned long action, void *hcpu)
1632{
8def9060 1633 switch (action) {
1da177e4 1634 case CPU_DEAD:
8bb78442 1635 case CPU_DEAD_FROZEN:
8def9060 1636 migrate_timers((long)hcpu);
1da177e4 1637 break;
1da177e4
LT
1638 default:
1639 break;
1640 }
3650b57f 1641
1da177e4
LT
1642 return NOTIFY_OK;
1643}
1644
3650b57f
PZ
1645static inline void timer_register_cpu_notifier(void)
1646{
1647 cpu_notifier(timer_cpu_notify, 0);
1648}
1649#else
1650static inline void timer_register_cpu_notifier(void) { }
1651#endif /* CONFIG_HOTPLUG_CPU */
1da177e4 1652
0eeda71b 1653static void __init init_timer_cpu(int cpu)
8def9060 1654{
0eeda71b 1655 struct tvec_base *base = per_cpu_ptr(&tvec_bases, cpu);
3650b57f 1656
8def9060 1657 base->cpu = cpu;
8def9060
VK
1658 spin_lock_init(&base->lock);
1659
8def9060
VK
1660 base->timer_jiffies = jiffies;
1661 base->next_timer = base->timer_jiffies;
1662}
1663
1664static void __init init_timer_cpus(void)
1da177e4 1665{
8def9060
VK
1666 int cpu;
1667
0eeda71b
TG
1668 for_each_possible_cpu(cpu)
1669 init_timer_cpu(cpu);
8def9060 1670}
e52b1db3 1671
8def9060
VK
1672void __init init_timers(void)
1673{
8def9060 1674 init_timer_cpus();
c24a4a36 1675 init_timer_stats();
3650b57f 1676 timer_register_cpu_notifier();
962cf36c 1677 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
1da177e4
LT
1678}
1679
1da177e4
LT
1680/**
1681 * msleep - sleep safely even with waitqueue interruptions
1682 * @msecs: Time in milliseconds to sleep for
1683 */
1684void msleep(unsigned int msecs)
1685{
1686 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1687
75bcc8c5
NA
1688 while (timeout)
1689 timeout = schedule_timeout_uninterruptible(timeout);
1da177e4
LT
1690}
1691
1692EXPORT_SYMBOL(msleep);
1693
1694/**
96ec3efd 1695 * msleep_interruptible - sleep waiting for signals
1da177e4
LT
1696 * @msecs: Time in milliseconds to sleep for
1697 */
1698unsigned long msleep_interruptible(unsigned int msecs)
1699{
1700 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1701
75bcc8c5
NA
1702 while (timeout && !signal_pending(current))
1703 timeout = schedule_timeout_interruptible(timeout);
1da177e4
LT
1704 return jiffies_to_msecs(timeout);
1705}
1706
1707EXPORT_SYMBOL(msleep_interruptible);
5e7f5a17 1708
6deba083 1709static void __sched do_usleep_range(unsigned long min, unsigned long max)
5e7f5a17
PP
1710{
1711 ktime_t kmin;
da8b44d5 1712 u64 delta;
5e7f5a17
PP
1713
1714 kmin = ktime_set(0, min * NSEC_PER_USEC);
da8b44d5 1715 delta = (u64)(max - min) * NSEC_PER_USEC;
6deba083 1716 schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
5e7f5a17
PP
1717}
1718
1719/**
1720 * usleep_range - Drop in replacement for udelay where wakeup is flexible
1721 * @min: Minimum time in usecs to sleep
1722 * @max: Maximum time in usecs to sleep
1723 */
2ad5d327 1724void __sched usleep_range(unsigned long min, unsigned long max)
5e7f5a17
PP
1725{
1726 __set_current_state(TASK_UNINTERRUPTIBLE);
1727 do_usleep_range(min, max);
1728}
1729EXPORT_SYMBOL(usleep_range);