clockevents: Cleanup dead cpu explicitely
[linux-2.6-block.git] / kernel / time / tick-broadcast.c
CommitLineData
f8381cba
TG
1/*
2 * linux/kernel/time/tick-broadcast.c
3 *
4 * This file contains functions which emulate a local clock-event
5 * device via a broadcast event source.
6 *
7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
10 *
11 * This code is licenced under the GPL version 2. For details see
12 * kernel-base/COPYING.
13 */
14#include <linux/cpu.h>
15#include <linux/err.h>
16#include <linux/hrtimer.h>
d7b90689 17#include <linux/interrupt.h>
f8381cba
TG
18#include <linux/percpu.h>
19#include <linux/profile.h>
20#include <linux/sched.h>
12ad1000 21#include <linux/smp.h>
ccf33d68 22#include <linux/module.h>
f8381cba
TG
23
24#include "tick-internal.h"
25
26/*
27 * Broadcast support for broken x86 hardware, where the local apic
28 * timer stops in C3 state.
29 */
30
a52f5c56 31static struct tick_device tick_broadcast_device;
b352bc1c 32static cpumask_var_t tick_broadcast_mask;
07bd1172 33static cpumask_var_t tick_broadcast_on;
b352bc1c 34static cpumask_var_t tmpmask;
b5f91da0 35static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
592a438f 36static int tick_broadcast_forced;
f8381cba 37
5590a536
TG
38#ifdef CONFIG_TICK_ONESHOT
39static void tick_broadcast_clear_oneshot(int cpu);
080873ce 40static void tick_resume_broadcast_oneshot(struct clock_event_device *bc);
5590a536
TG
41#else
42static inline void tick_broadcast_clear_oneshot(int cpu) { }
080873ce 43static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { }
5590a536
TG
44#endif
45
289f480a
IM
46/*
47 * Debugging: see timer_list.c
48 */
49struct tick_device *tick_get_broadcast_device(void)
50{
51 return &tick_broadcast_device;
52}
53
6b954823 54struct cpumask *tick_get_broadcast_mask(void)
289f480a 55{
b352bc1c 56 return tick_broadcast_mask;
289f480a
IM
57}
58
f8381cba
TG
59/*
60 * Start the device in periodic mode
61 */
62static void tick_broadcast_start_periodic(struct clock_event_device *bc)
63{
18de5bc4 64 if (bc)
f8381cba
TG
65 tick_setup_periodic(bc, 1);
66}
67
68/*
69 * Check, if the device can be utilized as broadcast device:
70 */
45cb8e01
TG
71static bool tick_check_broadcast_device(struct clock_event_device *curdev,
72 struct clock_event_device *newdev)
73{
74 if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) ||
245a3496 75 (newdev->features & CLOCK_EVT_FEAT_PERCPU) ||
45cb8e01
TG
76 (newdev->features & CLOCK_EVT_FEAT_C3STOP))
77 return false;
78
79 if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT &&
80 !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
81 return false;
82
83 return !curdev || newdev->rating > curdev->rating;
84}
85
86/*
87 * Conditionally install/replace broadcast device
88 */
7172a286 89void tick_install_broadcast_device(struct clock_event_device *dev)
f8381cba 90{
6f7a05d7
TG
91 struct clock_event_device *cur = tick_broadcast_device.evtdev;
92
45cb8e01 93 if (!tick_check_broadcast_device(cur, dev))
7172a286 94 return;
45cb8e01 95
ccf33d68
TG
96 if (!try_module_get(dev->owner))
97 return;
f8381cba 98
45cb8e01 99 clockevents_exchange_device(cur, dev);
6f7a05d7
TG
100 if (cur)
101 cur->event_handler = clockevents_handle_noop;
f8381cba 102 tick_broadcast_device.evtdev = dev;
b352bc1c 103 if (!cpumask_empty(tick_broadcast_mask))
f8381cba 104 tick_broadcast_start_periodic(dev);
c038c1c4
SB
105 /*
106 * Inform all cpus about this. We might be in a situation
107 * where we did not switch to oneshot mode because the per cpu
108 * devices are affected by CLOCK_EVT_FEAT_C3STOP and the lack
109 * of a oneshot capable broadcast device. Without that
110 * notification the systems stays stuck in periodic mode
111 * forever.
112 */
113 if (dev->features & CLOCK_EVT_FEAT_ONESHOT)
114 tick_clock_notify();
f8381cba
TG
115}
116
117/*
118 * Check, if the device is the broadcast device
119 */
120int tick_is_broadcast_device(struct clock_event_device *dev)
121{
122 return (dev && tick_broadcast_device.evtdev == dev);
123}
124
627ee794
TG
125int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq)
126{
127 int ret = -ENODEV;
128
129 if (tick_is_broadcast_device(dev)) {
130 raw_spin_lock(&tick_broadcast_lock);
131 ret = __clockevents_update_freq(dev, freq);
132 raw_spin_unlock(&tick_broadcast_lock);
133 }
134 return ret;
135}
136
137
12ad1000
MR
138static void err_broadcast(const struct cpumask *mask)
139{
140 pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
141}
142
5d1d9a29
MR
143static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
144{
145 if (!dev->broadcast)
146 dev->broadcast = tick_broadcast;
147 if (!dev->broadcast) {
148 pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
149 dev->name);
150 dev->broadcast = err_broadcast;
151 }
152}
153
f8381cba
TG
154/*
155 * Check, if the device is disfunctional and a place holder, which
156 * needs to be handled by the broadcast device.
157 */
158int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
159{
07bd1172 160 struct clock_event_device *bc = tick_broadcast_device.evtdev;
f8381cba 161 unsigned long flags;
07bd1172 162 int ret;
f8381cba 163
b5f91da0 164 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
f8381cba
TG
165
166 /*
167 * Devices might be registered with both periodic and oneshot
168 * mode disabled. This signals, that the device needs to be
169 * operated from the broadcast device and is a placeholder for
170 * the cpu local device.
171 */
172 if (!tick_device_is_functional(dev)) {
173 dev->event_handler = tick_handle_periodic;
5d1d9a29 174 tick_device_setup_broadcast_func(dev);
b352bc1c 175 cpumask_set_cpu(cpu, tick_broadcast_mask);
a272dcca
SB
176 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
177 tick_broadcast_start_periodic(bc);
178 else
179 tick_broadcast_setup_oneshot(bc);
f8381cba 180 ret = 1;
5590a536
TG
181 } else {
182 /*
07bd1172
TG
183 * Clear the broadcast bit for this cpu if the
184 * device is not power state affected.
5590a536 185 */
07bd1172 186 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
b352bc1c 187 cpumask_clear_cpu(cpu, tick_broadcast_mask);
07bd1172 188 else
5d1d9a29 189 tick_device_setup_broadcast_func(dev);
07bd1172
TG
190
191 /*
192 * Clear the broadcast bit if the CPU is not in
193 * periodic broadcast on state.
194 */
195 if (!cpumask_test_cpu(cpu, tick_broadcast_on))
196 cpumask_clear_cpu(cpu, tick_broadcast_mask);
197
198 switch (tick_broadcast_device.mode) {
199 case TICKDEV_MODE_ONESHOT:
200 /*
201 * If the system is in oneshot mode we can
202 * unconditionally clear the oneshot mask bit,
203 * because the CPU is running and therefore
204 * not in an idle state which causes the power
205 * state affected device to stop. Let the
206 * caller initialize the device.
207 */
208 tick_broadcast_clear_oneshot(cpu);
209 ret = 0;
210 break;
211
212 case TICKDEV_MODE_PERIODIC:
213 /*
214 * If the system is in periodic mode, check
215 * whether the broadcast device can be
216 * switched off now.
217 */
218 if (cpumask_empty(tick_broadcast_mask) && bc)
219 clockevents_shutdown(bc);
220 /*
221 * If we kept the cpu in the broadcast mask,
222 * tell the caller to leave the per cpu device
223 * in shutdown state. The periodic interrupt
224 * is delivered by the broadcast device.
225 */
226 ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
227 break;
228 default:
229 /* Nothing to do */
230 ret = 0;
231 break;
5590a536
TG
232 }
233 }
b5f91da0 234 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
f8381cba
TG
235 return ret;
236}
237
12572dbb
MR
238#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
239int tick_receive_broadcast(void)
240{
241 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
242 struct clock_event_device *evt = td->evtdev;
243
244 if (!evt)
245 return -ENODEV;
246
247 if (!evt->event_handler)
248 return -EINVAL;
249
250 evt->event_handler(evt);
251 return 0;
252}
253#endif
254
f8381cba 255/*
6b954823 256 * Broadcast the event to the cpus, which are set in the mask (mangled).
f8381cba 257 */
6b954823 258static void tick_do_broadcast(struct cpumask *mask)
f8381cba 259{
186e3cb8 260 int cpu = smp_processor_id();
f8381cba
TG
261 struct tick_device *td;
262
263 /*
264 * Check, if the current cpu is in the mask
265 */
6b954823
RR
266 if (cpumask_test_cpu(cpu, mask)) {
267 cpumask_clear_cpu(cpu, mask);
f8381cba
TG
268 td = &per_cpu(tick_cpu_device, cpu);
269 td->evtdev->event_handler(td->evtdev);
f8381cba
TG
270 }
271
6b954823 272 if (!cpumask_empty(mask)) {
f8381cba
TG
273 /*
274 * It might be necessary to actually check whether the devices
275 * have different broadcast functions. For now, just use the
276 * one of the first device. This works as long as we have this
277 * misfeature only on x86 (lapic)
278 */
6b954823
RR
279 td = &per_cpu(tick_cpu_device, cpumask_first(mask));
280 td->evtdev->broadcast(mask);
f8381cba 281 }
f8381cba
TG
282}
283
284/*
285 * Periodic broadcast:
286 * - invoke the broadcast handlers
287 */
288static void tick_do_periodic_broadcast(void)
289{
b352bc1c
TG
290 cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
291 tick_do_broadcast(tmpmask);
f8381cba
TG
292}
293
294/*
295 * Event handler for periodic broadcast ticks
296 */
297static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
298{
d4496b39
TG
299 ktime_t next;
300
627ee794
TG
301 raw_spin_lock(&tick_broadcast_lock);
302
f8381cba
TG
303 tick_do_periodic_broadcast();
304
305 /*
306 * The device is in periodic mode. No reprogramming necessary:
307 */
77e32c89 308 if (dev->state == CLOCK_EVT_STATE_PERIODIC)
627ee794 309 goto unlock;
f8381cba
TG
310
311 /*
312 * Setup the next period for devices, which do not have
d4496b39 313 * periodic mode. We read dev->next_event first and add to it
698f9315 314 * when the event already expired. clockevents_program_event()
d4496b39
TG
315 * sets dev->next_event only when the event is really
316 * programmed to the device.
f8381cba 317 */
d4496b39
TG
318 for (next = dev->next_event; ;) {
319 next = ktime_add(next, tick_period);
f8381cba 320
d1748302 321 if (!clockevents_program_event(dev, next, false))
627ee794 322 goto unlock;
f8381cba
TG
323 tick_do_periodic_broadcast();
324 }
627ee794
TG
325unlock:
326 raw_spin_unlock(&tick_broadcast_lock);
f8381cba
TG
327}
328
592a438f
TG
329/**
330 * tick_broadcast_control - Enable/disable or force broadcast mode
331 * @mode: The selected broadcast mode
332 *
333 * Called when the system enters a state where affected tick devices
334 * might stop. Note: TICK_BROADCAST_FORCE cannot be undone.
335 *
336 * Called with interrupts disabled, so clockevents_lock is not
337 * required here because the local clock event device cannot go away
338 * under us.
f8381cba 339 */
592a438f 340void tick_broadcast_control(enum tick_broadcast_mode mode)
f8381cba
TG
341{
342 struct clock_event_device *bc, *dev;
343 struct tick_device *td;
9c17bcda 344 int cpu, bc_stopped;
f8381cba 345
592a438f 346 td = this_cpu_ptr(&tick_cpu_device);
f8381cba 347 dev = td->evtdev;
f8381cba
TG
348
349 /*
1595f452 350 * Is the device not affected by the powerstate ?
f8381cba 351 */
1595f452 352 if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
592a438f 353 return;
f8381cba 354
3dfbc884 355 if (!tick_device_is_functional(dev))
592a438f 356 return;
1595f452 357
592a438f
TG
358 raw_spin_lock(&tick_broadcast_lock);
359 cpu = smp_processor_id();
360 bc = tick_broadcast_device.evtdev;
b352bc1c 361 bc_stopped = cpumask_empty(tick_broadcast_mask);
9c17bcda 362
592a438f
TG
363 switch (mode) {
364 case TICK_BROADCAST_FORCE:
365 tick_broadcast_forced = 1;
366 case TICK_BROADCAST_ON:
07bd1172 367 cpumask_set_cpu(cpu, tick_broadcast_on);
b352bc1c 368 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
07454bff
TG
369 if (tick_broadcast_device.mode ==
370 TICKDEV_MODE_PERIODIC)
2344abbc 371 clockevents_shutdown(dev);
f8381cba 372 }
1595f452 373 break;
592a438f
TG
374
375 case TICK_BROADCAST_OFF:
376 if (tick_broadcast_forced)
07bd1172
TG
377 break;
378 cpumask_clear_cpu(cpu, tick_broadcast_on);
379 if (!tick_device_is_functional(dev))
380 break;
381 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
07454bff
TG
382 if (tick_broadcast_device.mode ==
383 TICKDEV_MODE_PERIODIC)
f8381cba
TG
384 tick_setup_periodic(dev, 0);
385 }
1595f452 386 break;
f8381cba
TG
387 }
388
b352bc1c 389 if (cpumask_empty(tick_broadcast_mask)) {
9c17bcda 390 if (!bc_stopped)
2344abbc 391 clockevents_shutdown(bc);
9c17bcda 392 } else if (bc_stopped) {
f8381cba
TG
393 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
394 tick_broadcast_start_periodic(bc);
79bf2bb3
TG
395 else
396 tick_broadcast_setup_oneshot(bc);
f8381cba 397 }
592a438f 398 raw_spin_unlock(&tick_broadcast_lock);
f8381cba 399}
592a438f 400EXPORT_SYMBOL_GPL(tick_broadcast_control);
f8381cba
TG
401
402/*
403 * Set the periodic handler depending on broadcast on/off
404 */
405void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
406{
407 if (!broadcast)
408 dev->event_handler = tick_handle_periodic;
409 else
410 dev->event_handler = tick_handle_periodic_broadcast;
411}
412
a49b116d 413#ifdef CONFIG_HOTPLUG_CPU
f8381cba
TG
414/*
415 * Remove a CPU from broadcasting
416 */
a49b116d 417void tick_shutdown_broadcast(unsigned int cpu)
f8381cba
TG
418{
419 struct clock_event_device *bc;
420 unsigned long flags;
f8381cba 421
b5f91da0 422 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
f8381cba
TG
423
424 bc = tick_broadcast_device.evtdev;
b352bc1c 425 cpumask_clear_cpu(cpu, tick_broadcast_mask);
07bd1172 426 cpumask_clear_cpu(cpu, tick_broadcast_on);
f8381cba
TG
427
428 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
b352bc1c 429 if (bc && cpumask_empty(tick_broadcast_mask))
2344abbc 430 clockevents_shutdown(bc);
f8381cba
TG
431 }
432
b5f91da0 433 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
f8381cba 434}
a49b116d 435#endif
79bf2bb3 436
6321dd60
TG
437void tick_suspend_broadcast(void)
438{
439 struct clock_event_device *bc;
440 unsigned long flags;
441
b5f91da0 442 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
6321dd60
TG
443
444 bc = tick_broadcast_device.evtdev;
18de5bc4 445 if (bc)
2344abbc 446 clockevents_shutdown(bc);
6321dd60 447
b5f91da0 448 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
6321dd60
TG
449}
450
f46481d0
TG
451/*
452 * This is called from tick_resume_local() on a resuming CPU. That's
453 * called from the core resume function, tick_unfreeze() and the magic XEN
454 * resume hackery.
455 *
456 * In none of these cases the broadcast device mode can change and the
457 * bit of the resuming CPU in the broadcast mask is safe as well.
458 */
459bool tick_resume_check_broadcast(void)
460{
461 if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT)
462 return false;
463 else
464 return cpumask_test_cpu(smp_processor_id(), tick_broadcast_mask);
465}
466
467void tick_resume_broadcast(void)
6321dd60
TG
468{
469 struct clock_event_device *bc;
470 unsigned long flags;
6321dd60 471
b5f91da0 472 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
6321dd60
TG
473
474 bc = tick_broadcast_device.evtdev;
6321dd60 475
cd05a1f8 476 if (bc) {
554ef387 477 clockevents_tick_resume(bc);
18de5bc4 478
cd05a1f8
TG
479 switch (tick_broadcast_device.mode) {
480 case TICKDEV_MODE_PERIODIC:
b352bc1c 481 if (!cpumask_empty(tick_broadcast_mask))
cd05a1f8 482 tick_broadcast_start_periodic(bc);
cd05a1f8
TG
483 break;
484 case TICKDEV_MODE_ONESHOT:
b352bc1c 485 if (!cpumask_empty(tick_broadcast_mask))
080873ce 486 tick_resume_broadcast_oneshot(bc);
cd05a1f8
TG
487 break;
488 }
6321dd60 489 }
b5f91da0 490 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
6321dd60
TG
491}
492
79bf2bb3
TG
493#ifdef CONFIG_TICK_ONESHOT
494
b352bc1c 495static cpumask_var_t tick_broadcast_oneshot_mask;
26517f3e 496static cpumask_var_t tick_broadcast_pending_mask;
989dcb64 497static cpumask_var_t tick_broadcast_force_mask;
79bf2bb3 498
289f480a 499/*
6b954823 500 * Exposed for debugging: see timer_list.c
289f480a 501 */
6b954823 502struct cpumask *tick_get_broadcast_oneshot_mask(void)
289f480a 503{
b352bc1c 504 return tick_broadcast_oneshot_mask;
289f480a
IM
505}
506
eaa907c5
TG
507/*
508 * Called before going idle with interrupts disabled. Checks whether a
509 * broadcast event from the other core is about to happen. We detected
510 * that in tick_broadcast_oneshot_control(). The callsite can use this
511 * to avoid a deep idle transition as we are about to get the
512 * broadcast IPI right away.
513 */
514int tick_check_broadcast_expired(void)
515{
516 return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask);
517}
518
d2348fb6
DL
519/*
520 * Set broadcast interrupt affinity
521 */
522static void tick_broadcast_set_affinity(struct clock_event_device *bc,
523 const struct cpumask *cpumask)
524{
525 if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
526 return;
527
528 if (cpumask_equal(bc->cpumask, cpumask))
529 return;
530
531 bc->cpumask = cpumask;
532 irq_set_affinity(bc->irq, bc->cpumask);
533}
534
535static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
f9ae39d0 536 ktime_t expires, int force)
79bf2bb3 537{
d2348fb6
DL
538 int ret;
539
77e32c89
VK
540 if (bc->state != CLOCK_EVT_STATE_ONESHOT)
541 clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT);
b9a6a235 542
d2348fb6
DL
543 ret = clockevents_program_event(bc, expires, force);
544 if (!ret)
545 tick_broadcast_set_affinity(bc, cpumask_of(cpu));
546 return ret;
79bf2bb3
TG
547}
548
080873ce 549static void tick_resume_broadcast_oneshot(struct clock_event_device *bc)
cd05a1f8 550{
77e32c89 551 clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT);
cd05a1f8
TG
552}
553
fb02fbc1
TG
554/*
555 * Called from irq_enter() when idle was interrupted to reenable the
556 * per cpu device.
557 */
e8fcaa5c 558void tick_check_oneshot_broadcast_this_cpu(void)
fb02fbc1 559{
e8fcaa5c 560 if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) {
22127e93 561 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
fb02fbc1 562
1f73a980
TG
563 /*
564 * We might be in the middle of switching over from
565 * periodic to oneshot. If the CPU has not yet
566 * switched over, leave the device alone.
567 */
568 if (td->mode == TICKDEV_MODE_ONESHOT) {
77e32c89
VK
569 clockevents_set_state(td->evtdev,
570 CLOCK_EVT_STATE_ONESHOT);
1f73a980 571 }
fb02fbc1
TG
572 }
573}
574
79bf2bb3
TG
575/*
576 * Handle oneshot mode broadcasting
577 */
578static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
579{
580 struct tick_device *td;
cdc6f27d 581 ktime_t now, next_event;
d2348fb6 582 int cpu, next_cpu = 0;
79bf2bb3 583
b5f91da0 584 raw_spin_lock(&tick_broadcast_lock);
79bf2bb3
TG
585again:
586 dev->next_event.tv64 = KTIME_MAX;
cdc6f27d 587 next_event.tv64 = KTIME_MAX;
b352bc1c 588 cpumask_clear(tmpmask);
79bf2bb3
TG
589 now = ktime_get();
590 /* Find all expired events */
b352bc1c 591 for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
79bf2bb3 592 td = &per_cpu(tick_cpu_device, cpu);
d2348fb6 593 if (td->evtdev->next_event.tv64 <= now.tv64) {
b352bc1c 594 cpumask_set_cpu(cpu, tmpmask);
26517f3e
TG
595 /*
596 * Mark the remote cpu in the pending mask, so
597 * it can avoid reprogramming the cpu local
598 * timer in tick_broadcast_oneshot_control().
599 */
600 cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
d2348fb6 601 } else if (td->evtdev->next_event.tv64 < next_event.tv64) {
cdc6f27d 602 next_event.tv64 = td->evtdev->next_event.tv64;
d2348fb6
DL
603 next_cpu = cpu;
604 }
79bf2bb3
TG
605 }
606
2938d275
TG
607 /*
608 * Remove the current cpu from the pending mask. The event is
609 * delivered immediately in tick_do_broadcast() !
610 */
611 cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask);
612
989dcb64
TG
613 /* Take care of enforced broadcast requests */
614 cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
615 cpumask_clear(tick_broadcast_force_mask);
616
c9b5a266
TG
617 /*
618 * Sanity check. Catch the case where we try to broadcast to
619 * offline cpus.
620 */
621 if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask)))
622 cpumask_and(tmpmask, tmpmask, cpu_online_mask);
623
79bf2bb3 624 /*
cdc6f27d
TG
625 * Wakeup the cpus which have an expired event.
626 */
b352bc1c 627 tick_do_broadcast(tmpmask);
cdc6f27d
TG
628
629 /*
630 * Two reasons for reprogram:
631 *
632 * - The global event did not expire any CPU local
633 * events. This happens in dyntick mode, as the maximum PIT
634 * delta is quite small.
635 *
636 * - There are pending events on sleeping CPUs which were not
637 * in the event mask
79bf2bb3 638 */
cdc6f27d 639 if (next_event.tv64 != KTIME_MAX) {
79bf2bb3 640 /*
cdc6f27d
TG
641 * Rearm the broadcast device. If event expired,
642 * repeat the above
79bf2bb3 643 */
d2348fb6 644 if (tick_broadcast_set_event(dev, next_cpu, next_event, 0))
79bf2bb3
TG
645 goto again;
646 }
b5f91da0 647 raw_spin_unlock(&tick_broadcast_lock);
79bf2bb3
TG
648}
649
5d1638ac
PM
650static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
651{
652 if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER))
653 return 0;
654 if (bc->next_event.tv64 == KTIME_MAX)
655 return 0;
656 return bc->bound_on == cpu ? -EBUSY : 0;
657}
658
659static void broadcast_shutdown_local(struct clock_event_device *bc,
660 struct clock_event_device *dev)
661{
662 /*
663 * For hrtimer based broadcasting we cannot shutdown the cpu
664 * local device if our own event is the first one to expire or
665 * if we own the broadcast timer.
666 */
667 if (bc->features & CLOCK_EVT_FEAT_HRTIMER) {
668 if (broadcast_needs_cpu(bc, smp_processor_id()))
669 return;
670 if (dev->next_event.tv64 < bc->next_event.tv64)
671 return;
672 }
77e32c89 673 clockevents_set_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
5d1638ac
PM
674}
675
1fe5d5c3
TG
676/**
677 * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode
678 * @state: The target state (enter/exit)
679 *
680 * The system enters/leaves a state, where affected devices might stop
da7e6f45 681 * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
1fe5d5c3
TG
682 *
683 * Called with interrupts disabled, so clockevents_lock is not
684 * required here because the local clock event device cannot go away
685 * under us.
79bf2bb3 686 */
1fe5d5c3 687int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
79bf2bb3
TG
688{
689 struct clock_event_device *bc, *dev;
690 struct tick_device *td;
da7e6f45 691 int cpu, ret = 0;
1fe5d5c3 692 ktime_t now;
79bf2bb3 693
79bf2bb3
TG
694 /*
695 * Periodic mode does not care about the enter/exit of power
696 * states
697 */
698 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
5d1638ac 699 return 0;
79bf2bb3 700
7372b0b1
AK
701 /*
702 * We are called with preemtion disabled from the depth of the
703 * idle code, so we can't be moved away.
704 */
1fe5d5c3 705 td = this_cpu_ptr(&tick_cpu_device);
79bf2bb3
TG
706 dev = td->evtdev;
707
708 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
5d1638ac 709 return 0;
7372b0b1 710
1fe5d5c3 711 raw_spin_lock(&tick_broadcast_lock);
7372b0b1 712 bc = tick_broadcast_device.evtdev;
1fe5d5c3 713 cpu = smp_processor_id();
79bf2bb3 714
1fe5d5c3 715 if (state == TICK_BROADCAST_ENTER) {
b352bc1c 716 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
2938d275 717 WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
5d1638ac 718 broadcast_shutdown_local(bc, dev);
989dcb64
TG
719 /*
720 * We only reprogram the broadcast timer if we
721 * did not mark ourself in the force mask and
722 * if the cpu local event is earlier than the
723 * broadcast event. If the current CPU is in
724 * the force mask, then we are going to be
725 * woken by the IPI right away.
726 */
727 if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) &&
728 dev->next_event.tv64 < bc->next_event.tv64)
d2348fb6 729 tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
79bf2bb3 730 }
5d1638ac
PM
731 /*
732 * If the current CPU owns the hrtimer broadcast
733 * mechanism, it cannot go deep idle and we remove the
734 * CPU from the broadcast mask. We don't have to go
735 * through the EXIT path as the local timer is not
736 * shutdown.
737 */
738 ret = broadcast_needs_cpu(bc, cpu);
739 if (ret)
740 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
79bf2bb3 741 } else {
b352bc1c 742 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
77e32c89 743 clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT);
26517f3e
TG
744 /*
745 * The cpu which was handling the broadcast
746 * timer marked this cpu in the broadcast
747 * pending mask and fired the broadcast
748 * IPI. So we are going to handle the expired
749 * event anyway via the broadcast IPI
750 * handler. No need to reprogram the timer
751 * with an already expired event.
752 */
753 if (cpumask_test_and_clear_cpu(cpu,
754 tick_broadcast_pending_mask))
755 goto out;
756
ea8deb8d
DL
757 /*
758 * Bail out if there is no next event.
759 */
760 if (dev->next_event.tv64 == KTIME_MAX)
761 goto out;
989dcb64
TG
762 /*
763 * If the pending bit is not set, then we are
764 * either the CPU handling the broadcast
765 * interrupt or we got woken by something else.
766 *
767 * We are not longer in the broadcast mask, so
768 * if the cpu local expiry time is already
769 * reached, we would reprogram the cpu local
770 * timer with an already expired event.
771 *
772 * This can lead to a ping-pong when we return
773 * to idle and therefor rearm the broadcast
774 * timer before the cpu local timer was able
775 * to fire. This happens because the forced
776 * reprogramming makes sure that the event
777 * will happen in the future and depending on
778 * the min_delta setting this might be far
779 * enough out that the ping-pong starts.
780 *
781 * If the cpu local next_event has expired
782 * then we know that the broadcast timer
783 * next_event has expired as well and
784 * broadcast is about to be handled. So we
785 * avoid reprogramming and enforce that the
786 * broadcast handler, which did not run yet,
787 * will invoke the cpu local handler.
788 *
789 * We cannot call the handler directly from
790 * here, because we might be in a NOHZ phase
791 * and we did not go through the irq_enter()
792 * nohz fixups.
793 */
794 now = ktime_get();
795 if (dev->next_event.tv64 <= now.tv64) {
796 cpumask_set_cpu(cpu, tick_broadcast_force_mask);
797 goto out;
798 }
799 /*
800 * We got woken by something else. Reprogram
801 * the cpu local timer device.
802 */
26517f3e 803 tick_program_event(dev->next_event, 1);
79bf2bb3
TG
804 }
805 }
26517f3e 806out:
1fe5d5c3 807 raw_spin_unlock(&tick_broadcast_lock);
da7e6f45 808 return ret;
79bf2bb3 809}
1fe5d5c3 810EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control);
79bf2bb3 811
5590a536
TG
812/*
813 * Reset the one shot broadcast for a cpu
814 *
815 * Called with tick_broadcast_lock held
816 */
817static void tick_broadcast_clear_oneshot(int cpu)
818{
b352bc1c 819 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
dd5fd9b9 820 cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
5590a536
TG
821}
822
6b954823
RR
823static void tick_broadcast_init_next_event(struct cpumask *mask,
824 ktime_t expires)
7300711e
TG
825{
826 struct tick_device *td;
827 int cpu;
828
5db0e1e9 829 for_each_cpu(cpu, mask) {
7300711e
TG
830 td = &per_cpu(tick_cpu_device, cpu);
831 if (td->evtdev)
832 td->evtdev->next_event = expires;
833 }
834}
835
79bf2bb3 836/**
8dce39c2 837 * tick_broadcast_setup_oneshot - setup the broadcast device
79bf2bb3
TG
838 */
839void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
840{
07f4beb0
TG
841 int cpu = smp_processor_id();
842
9c17bcda
TG
843 /* Set it up only once ! */
844 if (bc->event_handler != tick_handle_oneshot_broadcast) {
77e32c89 845 int was_periodic = bc->state == CLOCK_EVT_STATE_PERIODIC;
7300711e 846
9c17bcda 847 bc->event_handler = tick_handle_oneshot_broadcast;
7300711e 848
7300711e
TG
849 /*
850 * We must be careful here. There might be other CPUs
851 * waiting for periodic broadcast. We need to set the
852 * oneshot_mask bits for those and program the
853 * broadcast device to fire.
854 */
b352bc1c
TG
855 cpumask_copy(tmpmask, tick_broadcast_mask);
856 cpumask_clear_cpu(cpu, tmpmask);
857 cpumask_or(tick_broadcast_oneshot_mask,
858 tick_broadcast_oneshot_mask, tmpmask);
6b954823 859
b352bc1c 860 if (was_periodic && !cpumask_empty(tmpmask)) {
77e32c89 861 clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT);
b352bc1c 862 tick_broadcast_init_next_event(tmpmask,
6b954823 863 tick_next_period);
d2348fb6 864 tick_broadcast_set_event(bc, cpu, tick_next_period, 1);
7300711e
TG
865 } else
866 bc->next_event.tv64 = KTIME_MAX;
07f4beb0
TG
867 } else {
868 /*
869 * The first cpu which switches to oneshot mode sets
870 * the bit for all other cpus which are in the general
871 * (periodic) broadcast mask. So the bit is set and
872 * would prevent the first broadcast enter after this
873 * to program the bc device.
874 */
875 tick_broadcast_clear_oneshot(cpu);
9c17bcda 876 }
79bf2bb3
TG
877}
878
879/*
880 * Select oneshot operating mode for the broadcast device
881 */
882void tick_broadcast_switch_to_oneshot(void)
883{
884 struct clock_event_device *bc;
885 unsigned long flags;
886
b5f91da0 887 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
fa4da365
SS
888
889 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
79bf2bb3
TG
890 bc = tick_broadcast_device.evtdev;
891 if (bc)
892 tick_broadcast_setup_oneshot(bc);
77b0d60c 893
b5f91da0 894 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
79bf2bb3
TG
895}
896
a49b116d
TG
897#ifdef CONFIG_HOTPLUG_CPU
898void hotplug_cpu__broadcast_tick_pull(int deadcpu)
899{
900 struct clock_event_device *bc;
901 unsigned long flags;
902
903 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
904 bc = tick_broadcast_device.evtdev;
905
906 if (bc && broadcast_needs_cpu(bc, deadcpu)) {
907 /* This moves the broadcast assignment to this CPU: */
908 clockevents_program_event(bc, bc->next_event, 1);
909 }
910 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
911}
79bf2bb3
TG
912
913/*
914 * Remove a dead CPU from broadcasting
915 */
a49b116d 916void tick_shutdown_broadcast_oneshot(unsigned int cpu)
79bf2bb3 917{
79bf2bb3 918 unsigned long flags;
79bf2bb3 919
b5f91da0 920 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
79bf2bb3 921
31d9b393 922 /*
c9b5a266
TG
923 * Clear the broadcast masks for the dead cpu, but do not stop
924 * the broadcast device!
31d9b393 925 */
b352bc1c 926 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
c9b5a266
TG
927 cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
928 cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
79bf2bb3 929
b5f91da0 930 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
79bf2bb3 931}
a49b116d 932#endif
79bf2bb3 933
27ce4cb4
TG
934/*
935 * Check, whether the broadcast device is in one shot mode
936 */
937int tick_broadcast_oneshot_active(void)
938{
939 return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
940}
941
3a142a06
TG
942/*
943 * Check whether the broadcast device supports oneshot.
944 */
945bool tick_broadcast_oneshot_available(void)
946{
947 struct clock_event_device *bc = tick_broadcast_device.evtdev;
948
949 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
950}
951
79bf2bb3 952#endif
b352bc1c
TG
953
954void __init tick_broadcast_init(void)
955{
fbd44a60 956 zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
07bd1172 957 zalloc_cpumask_var(&tick_broadcast_on, GFP_NOWAIT);
fbd44a60 958 zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
b352bc1c 959#ifdef CONFIG_TICK_ONESHOT
fbd44a60
TG
960 zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
961 zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
962 zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
b352bc1c
TG
963#endif
964}