1 // SPDX-License-Identifier: GPL-2.0
3 * This file contains functions which emulate a local clock-event
4 * device via a broadcast event source.
6 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
7 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
8 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
10 #include <linux/cpu.h>
11 #include <linux/err.h>
12 #include <linux/hrtimer.h>
13 #include <linux/interrupt.h>
14 #include <linux/percpu.h>
15 #include <linux/profile.h>
16 #include <linux/sched.h>
17 #include <linux/smp.h>
18 #include <linux/module.h>
20 #include "tick-internal.h"
23 * Broadcast support for broken x86 hardware, where the local apic
24 * timer stops in C3 state.
27 static struct tick_device tick_broadcast_device;
28 static cpumask_var_t tick_broadcast_mask __cpumask_var_read_mostly;
29 static cpumask_var_t tick_broadcast_on __cpumask_var_read_mostly;
30 static cpumask_var_t tmpmask __cpumask_var_read_mostly;
31 static int tick_broadcast_forced;
33 static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
35 #ifdef CONFIG_TICK_ONESHOT
36 static void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
37 static void tick_broadcast_clear_oneshot(int cpu);
38 static void tick_resume_broadcast_oneshot(struct clock_event_device *bc);
39 static void tick_broadcast_oneshot_offline(unsigned int cpu);
41 static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); }
42 static inline void tick_broadcast_clear_oneshot(int cpu) { }
43 static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { }
44 static inline void tick_broadcast_oneshot_offline(unsigned int cpu) { }
48 * Debugging: see timer_list.c
50 struct tick_device *tick_get_broadcast_device(void)
52 return &tick_broadcast_device;
55 struct cpumask *tick_get_broadcast_mask(void)
57 return tick_broadcast_mask;
61 * Start the device in periodic mode
63 static void tick_broadcast_start_periodic(struct clock_event_device *bc)
66 tick_setup_periodic(bc, 1);
70 * Check, if the device can be utilized as broadcast device:
72 static bool tick_check_broadcast_device(struct clock_event_device *curdev,
73 struct clock_event_device *newdev)
75 if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) ||
76 (newdev->features & CLOCK_EVT_FEAT_PERCPU) ||
77 (newdev->features & CLOCK_EVT_FEAT_C3STOP))
80 if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT &&
81 !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
84 return !curdev || newdev->rating > curdev->rating;
88 * Conditionally install/replace broadcast device
90 void tick_install_broadcast_device(struct clock_event_device *dev)
92 struct clock_event_device *cur = tick_broadcast_device.evtdev;
94 if (!tick_check_broadcast_device(cur, dev))
97 if (!try_module_get(dev->owner))
100 clockevents_exchange_device(cur, dev);
102 cur->event_handler = clockevents_handle_noop;
103 tick_broadcast_device.evtdev = dev;
104 if (!cpumask_empty(tick_broadcast_mask))
105 tick_broadcast_start_periodic(dev);
107 * Inform all cpus about this. We might be in a situation
108 * where we did not switch to oneshot mode because the per cpu
109 * devices are affected by CLOCK_EVT_FEAT_C3STOP and the lack
110 * of a oneshot capable broadcast device. Without that
111 * notification the systems stays stuck in periodic mode
114 if (dev->features & CLOCK_EVT_FEAT_ONESHOT)
119 * Check, if the device is the broadcast device
121 int tick_is_broadcast_device(struct clock_event_device *dev)
123 return (dev && tick_broadcast_device.evtdev == dev);
126 int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq)
130 if (tick_is_broadcast_device(dev)) {
131 raw_spin_lock(&tick_broadcast_lock);
132 ret = __clockevents_update_freq(dev, freq);
133 raw_spin_unlock(&tick_broadcast_lock);
139 static void err_broadcast(const struct cpumask *mask)
141 pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
144 static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
147 dev->broadcast = tick_broadcast;
148 if (!dev->broadcast) {
149 pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
151 dev->broadcast = err_broadcast;
156 * Check, if the device is disfunctional and a place holder, which
157 * needs to be handled by the broadcast device.
159 int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
161 struct clock_event_device *bc = tick_broadcast_device.evtdev;
165 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
168 * Devices might be registered with both periodic and oneshot
169 * mode disabled. This signals, that the device needs to be
170 * operated from the broadcast device and is a placeholder for
171 * the cpu local device.
173 if (!tick_device_is_functional(dev)) {
174 dev->event_handler = tick_handle_periodic;
175 tick_device_setup_broadcast_func(dev);
176 cpumask_set_cpu(cpu, tick_broadcast_mask);
177 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
178 tick_broadcast_start_periodic(bc);
180 tick_broadcast_setup_oneshot(bc);
184 * Clear the broadcast bit for this cpu if the
185 * device is not power state affected.
187 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
188 cpumask_clear_cpu(cpu, tick_broadcast_mask);
190 tick_device_setup_broadcast_func(dev);
193 * Clear the broadcast bit if the CPU is not in
194 * periodic broadcast on state.
196 if (!cpumask_test_cpu(cpu, tick_broadcast_on))
197 cpumask_clear_cpu(cpu, tick_broadcast_mask);
199 switch (tick_broadcast_device.mode) {
200 case TICKDEV_MODE_ONESHOT:
202 * If the system is in oneshot mode we can
203 * unconditionally clear the oneshot mask bit,
204 * because the CPU is running and therefore
205 * not in an idle state which causes the power
206 * state affected device to stop. Let the
207 * caller initialize the device.
209 tick_broadcast_clear_oneshot(cpu);
213 case TICKDEV_MODE_PERIODIC:
215 * If the system is in periodic mode, check
216 * whether the broadcast device can be
219 if (cpumask_empty(tick_broadcast_mask) && bc)
220 clockevents_shutdown(bc);
222 * If we kept the cpu in the broadcast mask,
223 * tell the caller to leave the per cpu device
224 * in shutdown state. The periodic interrupt
225 * is delivered by the broadcast device, if
226 * the broadcast device exists and is not
229 if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER))
230 ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
236 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
240 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
241 int tick_receive_broadcast(void)
243 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
244 struct clock_event_device *evt = td->evtdev;
249 if (!evt->event_handler)
252 evt->event_handler(evt);
258 * Broadcast the event to the cpus, which are set in the mask (mangled).
260 static bool tick_do_broadcast(struct cpumask *mask)
262 int cpu = smp_processor_id();
263 struct tick_device *td;
267 * Check, if the current cpu is in the mask
269 if (cpumask_test_cpu(cpu, mask)) {
270 struct clock_event_device *bc = tick_broadcast_device.evtdev;
272 cpumask_clear_cpu(cpu, mask);
274 * We only run the local handler, if the broadcast
275 * device is not hrtimer based. Otherwise we run into
276 * a hrtimer recursion.
278 * local timer_interrupt()
285 local = !(bc->features & CLOCK_EVT_FEAT_HRTIMER);
288 if (!cpumask_empty(mask)) {
290 * It might be necessary to actually check whether the devices
291 * have different broadcast functions. For now, just use the
292 * one of the first device. This works as long as we have this
293 * misfeature only on x86 (lapic)
295 td = &per_cpu(tick_cpu_device, cpumask_first(mask));
296 td->evtdev->broadcast(mask);
302 * Periodic broadcast:
303 * - invoke the broadcast handlers
305 static bool tick_do_periodic_broadcast(void)
307 cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
308 return tick_do_broadcast(tmpmask);
312 * Event handler for periodic broadcast ticks
314 static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
316 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
319 raw_spin_lock(&tick_broadcast_lock);
321 /* Handle spurious interrupts gracefully */
322 if (clockevent_state_shutdown(tick_broadcast_device.evtdev)) {
323 raw_spin_unlock(&tick_broadcast_lock);
327 bc_local = tick_do_periodic_broadcast();
329 if (clockevent_state_oneshot(dev)) {
330 ktime_t next = ktime_add(dev->next_event, tick_period);
332 clockevents_program_event(dev, next, true);
334 raw_spin_unlock(&tick_broadcast_lock);
337 * We run the handler of the local cpu after dropping
338 * tick_broadcast_lock because the handler might deadlock when
339 * trying to switch to oneshot mode.
342 td->evtdev->event_handler(td->evtdev);
346 * tick_broadcast_control - Enable/disable or force broadcast mode
347 * @mode: The selected broadcast mode
349 * Called when the system enters a state where affected tick devices
350 * might stop. Note: TICK_BROADCAST_FORCE cannot be undone.
352 void tick_broadcast_control(enum tick_broadcast_mode mode)
354 struct clock_event_device *bc, *dev;
355 struct tick_device *td;
359 /* Protects also the local clockevent device. */
360 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
361 td = this_cpu_ptr(&tick_cpu_device);
365 * Is the device not affected by the powerstate ?
367 if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
370 if (!tick_device_is_functional(dev))
373 cpu = smp_processor_id();
374 bc = tick_broadcast_device.evtdev;
375 bc_stopped = cpumask_empty(tick_broadcast_mask);
378 case TICK_BROADCAST_FORCE:
379 tick_broadcast_forced = 1;
381 case TICK_BROADCAST_ON:
382 cpumask_set_cpu(cpu, tick_broadcast_on);
383 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
385 * Only shutdown the cpu local device, if:
387 * - the broadcast device exists
388 * - the broadcast device is not a hrtimer based one
389 * - the broadcast device is in periodic mode to
390 * avoid a hickup during switch to oneshot mode
392 if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) &&
393 tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
394 clockevents_shutdown(dev);
398 case TICK_BROADCAST_OFF:
399 if (tick_broadcast_forced)
401 cpumask_clear_cpu(cpu, tick_broadcast_on);
402 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
403 if (tick_broadcast_device.mode ==
404 TICKDEV_MODE_PERIODIC)
405 tick_setup_periodic(dev, 0);
411 if (cpumask_empty(tick_broadcast_mask)) {
413 clockevents_shutdown(bc);
414 } else if (bc_stopped) {
415 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
416 tick_broadcast_start_periodic(bc);
418 tick_broadcast_setup_oneshot(bc);
422 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
424 EXPORT_SYMBOL_GPL(tick_broadcast_control);
427 * Set the periodic handler depending on broadcast on/off
429 void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
432 dev->event_handler = tick_handle_periodic;
434 dev->event_handler = tick_handle_periodic_broadcast;
437 #ifdef CONFIG_HOTPLUG_CPU
438 static void tick_shutdown_broadcast(void)
440 struct clock_event_device *bc = tick_broadcast_device.evtdev;
442 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
443 if (bc && cpumask_empty(tick_broadcast_mask))
444 clockevents_shutdown(bc);
449 * Remove a CPU from broadcasting
451 void tick_broadcast_offline(unsigned int cpu)
453 raw_spin_lock(&tick_broadcast_lock);
454 cpumask_clear_cpu(cpu, tick_broadcast_mask);
455 cpumask_clear_cpu(cpu, tick_broadcast_on);
456 tick_broadcast_oneshot_offline(cpu);
457 tick_shutdown_broadcast();
458 raw_spin_unlock(&tick_broadcast_lock);
463 void tick_suspend_broadcast(void)
465 struct clock_event_device *bc;
468 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
470 bc = tick_broadcast_device.evtdev;
472 clockevents_shutdown(bc);
474 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
478 * This is called from tick_resume_local() on a resuming CPU. That's
479 * called from the core resume function, tick_unfreeze() and the magic XEN
482 * In none of these cases the broadcast device mode can change and the
483 * bit of the resuming CPU in the broadcast mask is safe as well.
485 bool tick_resume_check_broadcast(void)
487 if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT)
490 return cpumask_test_cpu(smp_processor_id(), tick_broadcast_mask);
493 void tick_resume_broadcast(void)
495 struct clock_event_device *bc;
498 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
500 bc = tick_broadcast_device.evtdev;
503 clockevents_tick_resume(bc);
505 switch (tick_broadcast_device.mode) {
506 case TICKDEV_MODE_PERIODIC:
507 if (!cpumask_empty(tick_broadcast_mask))
508 tick_broadcast_start_periodic(bc);
510 case TICKDEV_MODE_ONESHOT:
511 if (!cpumask_empty(tick_broadcast_mask))
512 tick_resume_broadcast_oneshot(bc);
516 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
519 #ifdef CONFIG_TICK_ONESHOT
521 static cpumask_var_t tick_broadcast_oneshot_mask __cpumask_var_read_mostly;
522 static cpumask_var_t tick_broadcast_pending_mask __cpumask_var_read_mostly;
523 static cpumask_var_t tick_broadcast_force_mask __cpumask_var_read_mostly;
526 * Exposed for debugging: see timer_list.c
528 struct cpumask *tick_get_broadcast_oneshot_mask(void)
530 return tick_broadcast_oneshot_mask;
534 * Called before going idle with interrupts disabled. Checks whether a
535 * broadcast event from the other core is about to happen. We detected
536 * that in tick_broadcast_oneshot_control(). The callsite can use this
537 * to avoid a deep idle transition as we are about to get the
538 * broadcast IPI right away.
540 int tick_check_broadcast_expired(void)
542 return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask);
546 * Set broadcast interrupt affinity
548 static void tick_broadcast_set_affinity(struct clock_event_device *bc,
549 const struct cpumask *cpumask)
551 if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
554 if (cpumask_equal(bc->cpumask, cpumask))
557 bc->cpumask = cpumask;
558 irq_set_affinity(bc->irq, bc->cpumask);
561 static void tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
564 if (!clockevent_state_oneshot(bc))
565 clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
567 clockevents_program_event(bc, expires, 1);
568 tick_broadcast_set_affinity(bc, cpumask_of(cpu));
571 static void tick_resume_broadcast_oneshot(struct clock_event_device *bc)
573 clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
577 * Called from irq_enter() when idle was interrupted to reenable the
580 void tick_check_oneshot_broadcast_this_cpu(void)
582 if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) {
583 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
586 * We might be in the middle of switching over from
587 * periodic to oneshot. If the CPU has not yet
588 * switched over, leave the device alone.
590 if (td->mode == TICKDEV_MODE_ONESHOT) {
591 clockevents_switch_state(td->evtdev,
592 CLOCK_EVT_STATE_ONESHOT);
598 * Handle oneshot mode broadcasting
600 static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
602 struct tick_device *td;
603 ktime_t now, next_event;
604 int cpu, next_cpu = 0;
607 raw_spin_lock(&tick_broadcast_lock);
608 dev->next_event = KTIME_MAX;
609 next_event = KTIME_MAX;
610 cpumask_clear(tmpmask);
612 /* Find all expired events */
613 for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
615 * Required for !SMP because for_each_cpu() reports
616 * unconditionally CPU0 as set on UP kernels.
618 if (!IS_ENABLED(CONFIG_SMP) &&
619 cpumask_empty(tick_broadcast_oneshot_mask))
622 td = &per_cpu(tick_cpu_device, cpu);
623 if (td->evtdev->next_event <= now) {
624 cpumask_set_cpu(cpu, tmpmask);
626 * Mark the remote cpu in the pending mask, so
627 * it can avoid reprogramming the cpu local
628 * timer in tick_broadcast_oneshot_control().
630 cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
631 } else if (td->evtdev->next_event < next_event) {
632 next_event = td->evtdev->next_event;
638 * Remove the current cpu from the pending mask. The event is
639 * delivered immediately in tick_do_broadcast() !
641 cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask);
643 /* Take care of enforced broadcast requests */
644 cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
645 cpumask_clear(tick_broadcast_force_mask);
648 * Sanity check. Catch the case where we try to broadcast to
651 if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask)))
652 cpumask_and(tmpmask, tmpmask, cpu_online_mask);
655 * Wakeup the cpus which have an expired event.
657 bc_local = tick_do_broadcast(tmpmask);
660 * Two reasons for reprogram:
662 * - The global event did not expire any CPU local
663 * events. This happens in dyntick mode, as the maximum PIT
664 * delta is quite small.
666 * - There are pending events on sleeping CPUs which were not
669 if (next_event != KTIME_MAX)
670 tick_broadcast_set_event(dev, next_cpu, next_event);
672 raw_spin_unlock(&tick_broadcast_lock);
675 td = this_cpu_ptr(&tick_cpu_device);
676 td->evtdev->event_handler(td->evtdev);
680 static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
682 if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER))
684 if (bc->next_event == KTIME_MAX)
686 return bc->bound_on == cpu ? -EBUSY : 0;
689 static void broadcast_shutdown_local(struct clock_event_device *bc,
690 struct clock_event_device *dev)
693 * For hrtimer based broadcasting we cannot shutdown the cpu
694 * local device if our own event is the first one to expire or
695 * if we own the broadcast timer.
697 if (bc->features & CLOCK_EVT_FEAT_HRTIMER) {
698 if (broadcast_needs_cpu(bc, smp_processor_id()))
700 if (dev->next_event < bc->next_event)
703 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
706 int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
708 struct clock_event_device *bc, *dev;
713 * If there is no broadcast device, tell the caller not to go
716 if (!tick_broadcast_device.evtdev)
719 dev = this_cpu_ptr(&tick_cpu_device)->evtdev;
721 raw_spin_lock(&tick_broadcast_lock);
722 bc = tick_broadcast_device.evtdev;
723 cpu = smp_processor_id();
725 if (state == TICK_BROADCAST_ENTER) {
727 * If the current CPU owns the hrtimer broadcast
728 * mechanism, it cannot go deep idle and we do not add
729 * the CPU to the broadcast mask. We don't have to go
730 * through the EXIT path as the local timer is not
733 ret = broadcast_needs_cpu(bc, cpu);
738 * If the broadcast device is in periodic mode, we
741 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
742 /* If it is a hrtimer based broadcast, return busy */
743 if (bc->features & CLOCK_EVT_FEAT_HRTIMER)
748 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
749 WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
751 /* Conditionally shut down the local timer. */
752 broadcast_shutdown_local(bc, dev);
755 * We only reprogram the broadcast timer if we
756 * did not mark ourself in the force mask and
757 * if the cpu local event is earlier than the
758 * broadcast event. If the current CPU is in
759 * the force mask, then we are going to be
760 * woken by the IPI right away; we return
761 * busy, so the CPU does not try to go deep
764 if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) {
766 } else if (dev->next_event < bc->next_event) {
767 tick_broadcast_set_event(bc, cpu, dev->next_event);
769 * In case of hrtimer broadcasts the
770 * programming might have moved the
771 * timer to this cpu. If yes, remove
772 * us from the broadcast mask and
775 ret = broadcast_needs_cpu(bc, cpu);
777 cpumask_clear_cpu(cpu,
778 tick_broadcast_oneshot_mask);
783 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
784 clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
786 * The cpu which was handling the broadcast
787 * timer marked this cpu in the broadcast
788 * pending mask and fired the broadcast
789 * IPI. So we are going to handle the expired
790 * event anyway via the broadcast IPI
791 * handler. No need to reprogram the timer
792 * with an already expired event.
794 if (cpumask_test_and_clear_cpu(cpu,
795 tick_broadcast_pending_mask))
799 * Bail out if there is no next event.
801 if (dev->next_event == KTIME_MAX)
804 * If the pending bit is not set, then we are
805 * either the CPU handling the broadcast
806 * interrupt or we got woken by something else.
808 * We are not longer in the broadcast mask, so
809 * if the cpu local expiry time is already
810 * reached, we would reprogram the cpu local
811 * timer with an already expired event.
813 * This can lead to a ping-pong when we return
814 * to idle and therefor rearm the broadcast
815 * timer before the cpu local timer was able
816 * to fire. This happens because the forced
817 * reprogramming makes sure that the event
818 * will happen in the future and depending on
819 * the min_delta setting this might be far
820 * enough out that the ping-pong starts.
822 * If the cpu local next_event has expired
823 * then we know that the broadcast timer
824 * next_event has expired as well and
825 * broadcast is about to be handled. So we
826 * avoid reprogramming and enforce that the
827 * broadcast handler, which did not run yet,
828 * will invoke the cpu local handler.
830 * We cannot call the handler directly from
831 * here, because we might be in a NOHZ phase
832 * and we did not go through the irq_enter()
836 if (dev->next_event <= now) {
837 cpumask_set_cpu(cpu, tick_broadcast_force_mask);
841 * We got woken by something else. Reprogram
842 * the cpu local timer device.
844 tick_program_event(dev->next_event, 1);
848 raw_spin_unlock(&tick_broadcast_lock);
853 * Reset the one shot broadcast for a cpu
855 * Called with tick_broadcast_lock held
857 static void tick_broadcast_clear_oneshot(int cpu)
859 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
860 cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
863 static void tick_broadcast_init_next_event(struct cpumask *mask,
866 struct tick_device *td;
869 for_each_cpu(cpu, mask) {
870 td = &per_cpu(tick_cpu_device, cpu);
872 td->evtdev->next_event = expires;
877 * tick_broadcast_setup_oneshot - setup the broadcast device
879 static void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
881 int cpu = smp_processor_id();
886 /* Set it up only once ! */
887 if (bc->event_handler != tick_handle_oneshot_broadcast) {
888 int was_periodic = clockevent_state_periodic(bc);
890 bc->event_handler = tick_handle_oneshot_broadcast;
893 * We must be careful here. There might be other CPUs
894 * waiting for periodic broadcast. We need to set the
895 * oneshot_mask bits for those and program the
896 * broadcast device to fire.
898 cpumask_copy(tmpmask, tick_broadcast_mask);
899 cpumask_clear_cpu(cpu, tmpmask);
900 cpumask_or(tick_broadcast_oneshot_mask,
901 tick_broadcast_oneshot_mask, tmpmask);
903 if (was_periodic && !cpumask_empty(tmpmask)) {
904 clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
905 tick_broadcast_init_next_event(tmpmask,
907 tick_broadcast_set_event(bc, cpu, tick_next_period);
909 bc->next_event = KTIME_MAX;
912 * The first cpu which switches to oneshot mode sets
913 * the bit for all other cpus which are in the general
914 * (periodic) broadcast mask. So the bit is set and
915 * would prevent the first broadcast enter after this
916 * to program the bc device.
918 tick_broadcast_clear_oneshot(cpu);
923 * Select oneshot operating mode for the broadcast device
925 void tick_broadcast_switch_to_oneshot(void)
927 struct clock_event_device *bc;
930 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
932 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
933 bc = tick_broadcast_device.evtdev;
935 tick_broadcast_setup_oneshot(bc);
937 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
940 #ifdef CONFIG_HOTPLUG_CPU
941 void hotplug_cpu__broadcast_tick_pull(int deadcpu)
943 struct clock_event_device *bc;
946 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
947 bc = tick_broadcast_device.evtdev;
949 if (bc && broadcast_needs_cpu(bc, deadcpu)) {
950 /* This moves the broadcast assignment to this CPU: */
951 clockevents_program_event(bc, bc->next_event, 1);
953 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
957 * Remove a dying CPU from broadcasting
959 static void tick_broadcast_oneshot_offline(unsigned int cpu)
962 * Clear the broadcast masks for the dead cpu, but do not stop
963 * the broadcast device!
965 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
966 cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
967 cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
972 * Check, whether the broadcast device is in one shot mode
974 int tick_broadcast_oneshot_active(void)
976 return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
980 * Check whether the broadcast device supports oneshot.
982 bool tick_broadcast_oneshot_available(void)
984 struct clock_event_device *bc = tick_broadcast_device.evtdev;
986 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
990 int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
992 struct clock_event_device *bc = tick_broadcast_device.evtdev;
994 if (!bc || (bc->features & CLOCK_EVT_FEAT_HRTIMER))
1001 void __init tick_broadcast_init(void)
1003 zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
1004 zalloc_cpumask_var(&tick_broadcast_on, GFP_NOWAIT);
1005 zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
1006 #ifdef CONFIG_TICK_ONESHOT
1007 zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
1008 zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
1009 zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);