Commit | Line | Data |
---|---|---|
35728b82 | 1 | // SPDX-License-Identifier: GPL-2.0 |
906568c9 | 2 | /* |
906568c9 TG |
3 | * This file contains the base functions to manage periodic tick |
4 | * related events. | |
5 | * | |
6 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> | |
7 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar | |
8 | * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner | |
906568c9 TG |
9 | */ |
10 | #include <linux/cpu.h> | |
11 | #include <linux/err.h> | |
12 | #include <linux/hrtimer.h> | |
d7b90689 | 13 | #include <linux/interrupt.h> |
5167c506 | 14 | #include <linux/nmi.h> |
906568c9 TG |
15 | #include <linux/percpu.h> |
16 | #include <linux/profile.h> | |
17 | #include <linux/sched.h> | |
ccf33d68 | 18 | #include <linux/module.h> |
75e0678e | 19 | #include <trace/events/power.h> |
906568c9 | 20 | |
d7b90689 RK |
21 | #include <asm/irq_regs.h> |
22 | ||
f8381cba TG |
23 | #include "tick-internal.h" |
24 | ||
906568c9 TG |
25 | /* |
26 | * Tick devices | |
27 | */ | |
f8381cba | 28 | DEFINE_PER_CPU(struct tick_device, tick_cpu_device); |
906568c9 TG |
29 | /* |
30 | * Tick next event: keeps track of the tick time | |
31 | */ | |
f8381cba TG |
32 | ktime_t tick_next_period; |
33 | ktime_t tick_period; | |
050ded1b AM |
34 | |
35 | /* | |
36 | * tick_do_timer_cpu is a timer core internal variable which holds the CPU NR | |
37 | * which is responsible for calling do_timer(), i.e. the timekeeping stuff. This | |
38 | * variable has two functions: | |
39 | * | |
40 | * 1) Prevent a thundering herd issue of a gazillion of CPUs trying to grab the | |
41 | * timekeeping lock all at once. Only the CPU which is assigned to do the | |
42 | * update is handling it. | |
43 | * | |
44 | * 2) Hand off the duty in the NOHZ idle case by setting the value to | |
45 | * TICK_DO_TIMER_NONE, i.e. a non existing CPU. So the next cpu which looks | |
46 | * at it will take over and keep the time keeping alive. The handover | |
47 | * procedure also covers cpu hotplug. | |
48 | */ | |
6441402b | 49 | int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; |
08ae95f4 NP |
50 | #ifdef CONFIG_NO_HZ_FULL |
51 | /* | |
52 | * tick_do_timer_boot_cpu indicates the boot CPU temporarily owns | |
53 | * tick_do_timer_cpu and it should be taken over by an eligible secondary | |
54 | * when one comes online. | |
55 | */ | |
56 | static int tick_do_timer_boot_cpu __read_mostly = -1; | |
57 | #endif | |
906568c9 | 58 | |
289f480a IM |
59 | /* |
60 | * Debugging: see timer_list.c | |
61 | */ | |
62 | struct tick_device *tick_get_device(int cpu) | |
63 | { | |
64 | return &per_cpu(tick_cpu_device, cpu); | |
65 | } | |
66 | ||
79bf2bb3 TG |
67 | /** |
68 | * tick_is_oneshot_available - check for a oneshot capable event device | |
69 | */ | |
70 | int tick_is_oneshot_available(void) | |
71 | { | |
909ea964 | 72 | struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); |
79bf2bb3 | 73 | |
3a142a06 TG |
74 | if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT)) |
75 | return 0; | |
76 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) | |
77 | return 1; | |
78 | return tick_broadcast_oneshot_available(); | |
79bf2bb3 TG |
79 | } |
80 | ||
906568c9 TG |
81 | /* |
82 | * Periodic tick | |
83 | */ | |
84 | static void tick_periodic(int cpu) | |
85 | { | |
86 | if (tick_do_timer_cpu == cpu) { | |
e5d4d175 TG |
87 | raw_spin_lock(&jiffies_lock); |
88 | write_seqcount_begin(&jiffies_seq); | |
906568c9 TG |
89 | |
90 | /* Keep track of the next tick event */ | |
91 | tick_next_period = ktime_add(tick_next_period, tick_period); | |
92 | ||
93 | do_timer(1); | |
e5d4d175 TG |
94 | write_seqcount_end(&jiffies_seq); |
95 | raw_spin_unlock(&jiffies_lock); | |
47a1b796 | 96 | update_wall_time(); |
906568c9 TG |
97 | } |
98 | ||
99 | update_process_times(user_mode(get_irq_regs())); | |
100 | profile_tick(CPU_PROFILING); | |
101 | } | |
102 | ||
103 | /* | |
104 | * Event handler for periodic ticks | |
105 | */ | |
106 | void tick_handle_periodic(struct clock_event_device *dev) | |
107 | { | |
108 | int cpu = smp_processor_id(); | |
b97f0291 | 109 | ktime_t next = dev->next_event; |
906568c9 TG |
110 | |
111 | tick_periodic(cpu); | |
112 | ||
c6eb3f70 TG |
113 | #if defined(CONFIG_HIGH_RES_TIMERS) || defined(CONFIG_NO_HZ_COMMON) |
114 | /* | |
115 | * The cpu might have transitioned to HIGHRES or NOHZ mode via | |
116 | * update_process_times() -> run_local_timers() -> | |
117 | * hrtimer_run_queues(). | |
118 | */ | |
119 | if (dev->event_handler != tick_handle_periodic) | |
120 | return; | |
121 | #endif | |
122 | ||
472c4a94 | 123 | if (!clockevent_state_oneshot(dev)) |
906568c9 | 124 | return; |
906568c9 | 125 | for (;;) { |
b97f0291 VK |
126 | /* |
127 | * Setup the next period for devices, which do not have | |
128 | * periodic mode: | |
129 | */ | |
130 | next = ktime_add(next, tick_period); | |
131 | ||
d1748302 | 132 | if (!clockevents_program_event(dev, next, false)) |
906568c9 | 133 | return; |
74a03b69 | 134 | /* |
135 | * Have to be careful here. If we're in oneshot mode, | |
136 | * before we call tick_periodic() in a loop, we need | |
137 | * to be sure we're using a real hardware clocksource. | |
138 | * Otherwise we could get trapped in an infinite | |
139 | * loop, as the tick_periodic() increments jiffies, | |
cacb3c76 | 140 | * which then will increment time, possibly causing |
74a03b69 | 141 | * the loop to trigger again and again. |
142 | */ | |
143 | if (timekeeping_valid_for_hres()) | |
144 | tick_periodic(cpu); | |
906568c9 TG |
145 | } |
146 | } | |
147 | ||
148 | /* | |
149 | * Setup the device for a periodic tick | |
150 | */ | |
f8381cba | 151 | void tick_setup_periodic(struct clock_event_device *dev, int broadcast) |
906568c9 | 152 | { |
f8381cba TG |
153 | tick_set_periodic_handler(dev, broadcast); |
154 | ||
155 | /* Broadcast setup ? */ | |
156 | if (!tick_device_is_functional(dev)) | |
157 | return; | |
906568c9 | 158 | |
27ce4cb4 TG |
159 | if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) && |
160 | !tick_broadcast_oneshot_active()) { | |
d7eb231c | 161 | clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC); |
906568c9 | 162 | } else { |
e1e41b6c | 163 | unsigned int seq; |
906568c9 TG |
164 | ktime_t next; |
165 | ||
166 | do { | |
e5d4d175 | 167 | seq = read_seqcount_begin(&jiffies_seq); |
906568c9 | 168 | next = tick_next_period; |
e5d4d175 | 169 | } while (read_seqcount_retry(&jiffies_seq, seq)); |
906568c9 | 170 | |
d7eb231c | 171 | clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); |
906568c9 TG |
172 | |
173 | for (;;) { | |
d1748302 | 174 | if (!clockevents_program_event(dev, next, false)) |
906568c9 TG |
175 | return; |
176 | next = ktime_add(next, tick_period); | |
177 | } | |
178 | } | |
179 | } | |
180 | ||
08ae95f4 NP |
181 | #ifdef CONFIG_NO_HZ_FULL |
182 | static void giveup_do_timer(void *info) | |
183 | { | |
184 | int cpu = *(unsigned int *)info; | |
185 | ||
186 | WARN_ON(tick_do_timer_cpu != smp_processor_id()); | |
187 | ||
188 | tick_do_timer_cpu = cpu; | |
189 | } | |
190 | ||
191 | static void tick_take_do_timer_from_boot(void) | |
192 | { | |
193 | int cpu = smp_processor_id(); | |
194 | int from = tick_do_timer_boot_cpu; | |
195 | ||
196 | if (from >= 0 && from != cpu) | |
197 | smp_call_function_single(from, giveup_do_timer, &cpu, 1); | |
198 | } | |
199 | #endif | |
200 | ||
906568c9 TG |
201 | /* |
202 | * Setup the tick device | |
203 | */ | |
204 | static void tick_setup_device(struct tick_device *td, | |
205 | struct clock_event_device *newdev, int cpu, | |
0de26520 | 206 | const struct cpumask *cpumask) |
906568c9 | 207 | { |
906568c9 | 208 | void (*handler)(struct clock_event_device *) = NULL; |
8b0e1953 | 209 | ktime_t next_event = 0; |
906568c9 TG |
210 | |
211 | /* | |
212 | * First device setup ? | |
213 | */ | |
214 | if (!td->evtdev) { | |
215 | /* | |
216 | * If no cpu took the do_timer update, assign it to | |
217 | * this cpu: | |
218 | */ | |
6441402b | 219 | if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) { |
08ae95f4 NP |
220 | tick_do_timer_cpu = cpu; |
221 | ||
906568c9 | 222 | tick_next_period = ktime_get(); |
8b0e1953 | 223 | tick_period = NSEC_PER_SEC / HZ; |
08ae95f4 NP |
224 | #ifdef CONFIG_NO_HZ_FULL |
225 | /* | |
226 | * The boot CPU may be nohz_full, in which case set | |
227 | * tick_do_timer_boot_cpu so the first housekeeping | |
228 | * secondary that comes up will take do_timer from | |
229 | * us. | |
230 | */ | |
231 | if (tick_nohz_full_cpu(cpu)) | |
232 | tick_do_timer_boot_cpu = cpu; | |
233 | ||
234 | } else if (tick_do_timer_boot_cpu != -1 && | |
235 | !tick_nohz_full_cpu(cpu)) { | |
236 | tick_take_do_timer_from_boot(); | |
237 | tick_do_timer_boot_cpu = -1; | |
238 | WARN_ON(tick_do_timer_cpu != cpu); | |
239 | #endif | |
906568c9 TG |
240 | } |
241 | ||
242 | /* | |
243 | * Startup in periodic mode first. | |
244 | */ | |
245 | td->mode = TICKDEV_MODE_PERIODIC; | |
246 | } else { | |
247 | handler = td->evtdev->event_handler; | |
248 | next_event = td->evtdev->next_event; | |
7c1e7689 | 249 | td->evtdev->event_handler = clockevents_handle_noop; |
906568c9 TG |
250 | } |
251 | ||
252 | td->evtdev = newdev; | |
253 | ||
254 | /* | |
255 | * When the device is not per cpu, pin the interrupt to the | |
256 | * current cpu: | |
257 | */ | |
320ab2b0 | 258 | if (!cpumask_equal(newdev->cpumask, cpumask)) |
0de26520 | 259 | irq_set_affinity(newdev->irq, cpumask); |
906568c9 | 260 | |
f8381cba TG |
261 | /* |
262 | * When global broadcasting is active, check if the current | |
263 | * device is registered as a placeholder for broadcast mode. | |
264 | * This allows us to handle this x86 misfeature in a generic | |
07bd1172 TG |
265 | * way. This function also returns !=0 when we keep the |
266 | * current active broadcast state for this CPU. | |
f8381cba TG |
267 | */ |
268 | if (tick_device_uses_broadcast(newdev, cpu)) | |
269 | return; | |
270 | ||
906568c9 TG |
271 | if (td->mode == TICKDEV_MODE_PERIODIC) |
272 | tick_setup_periodic(newdev, 0); | |
79bf2bb3 TG |
273 | else |
274 | tick_setup_oneshot(newdev, handler, next_event); | |
906568c9 TG |
275 | } |
276 | ||
03e13cf5 TG |
277 | void tick_install_replacement(struct clock_event_device *newdev) |
278 | { | |
22127e93 | 279 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
03e13cf5 TG |
280 | int cpu = smp_processor_id(); |
281 | ||
282 | clockevents_exchange_device(td->evtdev, newdev); | |
283 | tick_setup_device(td, newdev, cpu, cpumask_of(cpu)); | |
284 | if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) | |
285 | tick_oneshot_notify(); | |
286 | } | |
287 | ||
45cb8e01 TG |
288 | static bool tick_check_percpu(struct clock_event_device *curdev, |
289 | struct clock_event_device *newdev, int cpu) | |
290 | { | |
291 | if (!cpumask_test_cpu(cpu, newdev->cpumask)) | |
292 | return false; | |
293 | if (cpumask_equal(newdev->cpumask, cpumask_of(cpu))) | |
294 | return true; | |
295 | /* Check if irq affinity can be set */ | |
296 | if (newdev->irq >= 0 && !irq_can_set_affinity(newdev->irq)) | |
297 | return false; | |
298 | /* Prefer an existing cpu local device */ | |
299 | if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu))) | |
300 | return false; | |
301 | return true; | |
302 | } | |
303 | ||
304 | static bool tick_check_preferred(struct clock_event_device *curdev, | |
305 | struct clock_event_device *newdev) | |
306 | { | |
307 | /* Prefer oneshot capable device */ | |
308 | if (!(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) { | |
309 | if (curdev && (curdev->features & CLOCK_EVT_FEAT_ONESHOT)) | |
310 | return false; | |
311 | if (tick_oneshot_mode_active()) | |
312 | return false; | |
313 | } | |
314 | ||
70e5975d SB |
315 | /* |
316 | * Use the higher rated one, but prefer a CPU local device with a lower | |
317 | * rating than a non-CPU local device | |
318 | */ | |
319 | return !curdev || | |
320 | newdev->rating > curdev->rating || | |
5b5ccbc2 | 321 | !cpumask_equal(curdev->cpumask, newdev->cpumask); |
45cb8e01 TG |
322 | } |
323 | ||
03e13cf5 TG |
324 | /* |
325 | * Check whether the new device is a better fit than curdev. curdev | |
326 | * can be NULL ! | |
327 | */ | |
328 | bool tick_check_replacement(struct clock_event_device *curdev, | |
329 | struct clock_event_device *newdev) | |
330 | { | |
521c4299 | 331 | if (!tick_check_percpu(curdev, newdev, smp_processor_id())) |
03e13cf5 TG |
332 | return false; |
333 | ||
334 | return tick_check_preferred(curdev, newdev); | |
335 | } | |
336 | ||
906568c9 | 337 | /* |
7126cac4 TG |
338 | * Check, if the new registered device should be used. Called with |
339 | * clockevents_lock held and interrupts disabled. | |
906568c9 | 340 | */ |
7172a286 | 341 | void tick_check_new_device(struct clock_event_device *newdev) |
906568c9 TG |
342 | { |
343 | struct clock_event_device *curdev; | |
344 | struct tick_device *td; | |
7172a286 | 345 | int cpu; |
906568c9 TG |
346 | |
347 | cpu = smp_processor_id(); | |
906568c9 TG |
348 | td = &per_cpu(tick_cpu_device, cpu); |
349 | curdev = td->evtdev; | |
906568c9 TG |
350 | |
351 | /* cpu local device ? */ | |
45cb8e01 TG |
352 | if (!tick_check_percpu(curdev, newdev, cpu)) |
353 | goto out_bc; | |
906568c9 | 354 | |
45cb8e01 TG |
355 | /* Preference decision */ |
356 | if (!tick_check_preferred(curdev, newdev)) | |
357 | goto out_bc; | |
906568c9 | 358 | |
ccf33d68 TG |
359 | if (!try_module_get(newdev->owner)) |
360 | return; | |
361 | ||
906568c9 TG |
362 | /* |
363 | * Replace the eventually existing device by the new | |
f8381cba TG |
364 | * device. If the current device is the broadcast device, do |
365 | * not give it back to the clockevents layer ! | |
906568c9 | 366 | */ |
f8381cba | 367 | if (tick_is_broadcast_device(curdev)) { |
2344abbc | 368 | clockevents_shutdown(curdev); |
f8381cba TG |
369 | curdev = NULL; |
370 | } | |
906568c9 | 371 | clockevents_exchange_device(curdev, newdev); |
6b954823 | 372 | tick_setup_device(td, newdev, cpu, cpumask_of(cpu)); |
79bf2bb3 TG |
373 | if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) |
374 | tick_oneshot_notify(); | |
7172a286 | 375 | return; |
f8381cba TG |
376 | |
377 | out_bc: | |
378 | /* | |
379 | * Can the new device be used as a broadcast device ? | |
380 | */ | |
7172a286 | 381 | tick_install_broadcast_device(newdev); |
906568c9 TG |
382 | } |
383 | ||
f32dd117 TG |
384 | /** |
385 | * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode | |
386 | * @state: The target state (enter/exit) | |
387 | * | |
388 | * The system enters/leaves a state, where affected devices might stop | |
389 | * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups. | |
390 | * | |
391 | * Called with interrupts disabled, so clockevents_lock is not | |
392 | * required here because the local clock event device cannot go away | |
393 | * under us. | |
394 | */ | |
395 | int tick_broadcast_oneshot_control(enum tick_broadcast_state state) | |
396 | { | |
397 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); | |
398 | ||
399 | if (!(td->evtdev->features & CLOCK_EVT_FEAT_C3STOP)) | |
400 | return 0; | |
401 | ||
402 | return __tick_broadcast_oneshot_control(state); | |
403 | } | |
0f447051 | 404 | EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control); |
f32dd117 | 405 | |
52c063d1 | 406 | #ifdef CONFIG_HOTPLUG_CPU |
94df7de0 SD |
407 | /* |
408 | * Transfer the do_timer job away from a dying cpu. | |
409 | * | |
52c063d1 TG |
410 | * Called with interrupts disabled. Not locking required. If |
411 | * tick_do_timer_cpu is owned by this cpu, nothing can change it. | |
94df7de0 | 412 | */ |
52c063d1 | 413 | void tick_handover_do_timer(void) |
94df7de0 | 414 | { |
52c063d1 | 415 | if (tick_do_timer_cpu == smp_processor_id()) { |
94df7de0 SD |
416 | int cpu = cpumask_first(cpu_online_mask); |
417 | ||
418 | tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu : | |
419 | TICK_DO_TIMER_NONE; | |
420 | } | |
421 | } | |
422 | ||
906568c9 TG |
423 | /* |
424 | * Shutdown an event device on a given cpu: | |
425 | * | |
426 | * This is called on a life CPU, when a CPU is dead. So we cannot | |
427 | * access the hardware device itself. | |
428 | * We just set the mode and remove it from the lists. | |
429 | */ | |
a49b116d | 430 | void tick_shutdown(unsigned int cpu) |
906568c9 | 431 | { |
a49b116d | 432 | struct tick_device *td = &per_cpu(tick_cpu_device, cpu); |
906568c9 | 433 | struct clock_event_device *dev = td->evtdev; |
906568c9 | 434 | |
906568c9 TG |
435 | td->mode = TICKDEV_MODE_PERIODIC; |
436 | if (dev) { | |
437 | /* | |
438 | * Prevent that the clock events layer tries to call | |
439 | * the set mode function! | |
440 | */ | |
051ebd10 | 441 | clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED); |
906568c9 | 442 | clockevents_exchange_device(dev, NULL); |
6f7a05d7 | 443 | dev->event_handler = clockevents_handle_noop; |
906568c9 TG |
444 | td->evtdev = NULL; |
445 | } | |
906568c9 | 446 | } |
a49b116d | 447 | #endif |
906568c9 | 448 | |
4ffee521 | 449 | /** |
f46481d0 | 450 | * tick_suspend_local - Suspend the local tick device |
4ffee521 | 451 | * |
f46481d0 | 452 | * Called from the local cpu for freeze with interrupts disabled. |
4ffee521 TG |
453 | * |
454 | * No locks required. Nothing can change the per cpu device. | |
455 | */ | |
7270d11c | 456 | void tick_suspend_local(void) |
6321dd60 | 457 | { |
22127e93 | 458 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
6321dd60 | 459 | |
2344abbc | 460 | clockevents_shutdown(td->evtdev); |
6321dd60 TG |
461 | } |
462 | ||
4ffee521 | 463 | /** |
f46481d0 | 464 | * tick_resume_local - Resume the local tick device |
4ffee521 | 465 | * |
f46481d0 | 466 | * Called from the local CPU for unfreeze or XEN resume magic. |
4ffee521 TG |
467 | * |
468 | * No locks required. Nothing can change the per cpu device. | |
469 | */ | |
f46481d0 | 470 | void tick_resume_local(void) |
6321dd60 | 471 | { |
f46481d0 TG |
472 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
473 | bool broadcast = tick_resume_check_broadcast(); | |
6321dd60 | 474 | |
554ef387 | 475 | clockevents_tick_resume(td->evtdev); |
18de5bc4 TG |
476 | if (!broadcast) { |
477 | if (td->mode == TICKDEV_MODE_PERIODIC) | |
478 | tick_setup_periodic(td->evtdev, 0); | |
479 | else | |
480 | tick_resume_oneshot(); | |
481 | } | |
6321dd60 TG |
482 | } |
483 | ||
f46481d0 TG |
484 | /** |
485 | * tick_suspend - Suspend the tick and the broadcast device | |
486 | * | |
487 | * Called from syscore_suspend() via timekeeping_suspend with only one | |
488 | * CPU online and interrupts disabled or from tick_unfreeze() under | |
489 | * tick_freeze_lock. | |
490 | * | |
491 | * No locks required. Nothing can change the per cpu device. | |
492 | */ | |
493 | void tick_suspend(void) | |
494 | { | |
495 | tick_suspend_local(); | |
496 | tick_suspend_broadcast(); | |
497 | } | |
498 | ||
499 | /** | |
500 | * tick_resume - Resume the tick and the broadcast device | |
501 | * | |
502 | * Called from syscore_resume() via timekeeping_resume with only one | |
503 | * CPU online and interrupts disabled. | |
504 | * | |
505 | * No locks required. Nothing can change the per cpu device. | |
506 | */ | |
507 | void tick_resume(void) | |
508 | { | |
509 | tick_resume_broadcast(); | |
510 | tick_resume_local(); | |
511 | } | |
512 | ||
87e9b9f1 | 513 | #ifdef CONFIG_SUSPEND |
124cf911 RW |
514 | static DEFINE_RAW_SPINLOCK(tick_freeze_lock); |
515 | static unsigned int tick_freeze_depth; | |
516 | ||
517 | /** | |
518 | * tick_freeze - Suspend the local tick and (possibly) timekeeping. | |
519 | * | |
520 | * Check if this is the last online CPU executing the function and if so, | |
521 | * suspend timekeeping. Otherwise suspend the local tick. | |
522 | * | |
523 | * Call with interrupts disabled. Must be balanced with %tick_unfreeze(). | |
524 | * Interrupts must not be enabled before the subsequent %tick_unfreeze(). | |
525 | */ | |
526 | void tick_freeze(void) | |
527 | { | |
528 | raw_spin_lock(&tick_freeze_lock); | |
529 | ||
530 | tick_freeze_depth++; | |
75e0678e RW |
531 | if (tick_freeze_depth == num_online_cpus()) { |
532 | trace_suspend_resume(TPS("timekeeping_freeze"), | |
533 | smp_processor_id(), true); | |
c1a957d1 | 534 | system_state = SYSTEM_SUSPEND; |
3f2552f7 | 535 | sched_clock_suspend(); |
124cf911 | 536 | timekeeping_suspend(); |
75e0678e | 537 | } else { |
f46481d0 | 538 | tick_suspend_local(); |
75e0678e | 539 | } |
124cf911 RW |
540 | |
541 | raw_spin_unlock(&tick_freeze_lock); | |
542 | } | |
543 | ||
544 | /** | |
545 | * tick_unfreeze - Resume the local tick and (possibly) timekeeping. | |
546 | * | |
547 | * Check if this is the first CPU executing the function and if so, resume | |
548 | * timekeeping. Otherwise resume the local tick. | |
549 | * | |
550 | * Call with interrupts disabled. Must be balanced with %tick_freeze(). | |
551 | * Interrupts must not be enabled after the preceding %tick_freeze(). | |
552 | */ | |
553 | void tick_unfreeze(void) | |
554 | { | |
555 | raw_spin_lock(&tick_freeze_lock); | |
556 | ||
75e0678e | 557 | if (tick_freeze_depth == num_online_cpus()) { |
124cf911 | 558 | timekeeping_resume(); |
3f2552f7 | 559 | sched_clock_resume(); |
c1a957d1 | 560 | system_state = SYSTEM_RUNNING; |
75e0678e RW |
561 | trace_suspend_resume(TPS("timekeeping_freeze"), |
562 | smp_processor_id(), false); | |
563 | } else { | |
5167c506 | 564 | touch_softlockup_watchdog(); |
422fe750 | 565 | tick_resume_local(); |
75e0678e | 566 | } |
124cf911 RW |
567 | |
568 | tick_freeze_depth--; | |
569 | ||
570 | raw_spin_unlock(&tick_freeze_lock); | |
571 | } | |
87e9b9f1 | 572 | #endif /* CONFIG_SUSPEND */ |
124cf911 | 573 | |
906568c9 TG |
574 | /** |
575 | * tick_init - initialize the tick control | |
906568c9 TG |
576 | */ |
577 | void __init tick_init(void) | |
578 | { | |
b352bc1c | 579 | tick_broadcast_init(); |
a80e49e2 | 580 | tick_nohz_init(); |
906568c9 | 581 | } |