Commit | Line | Data |
---|---|---|
f8381cba TG |
1 | /* |
2 | * linux/kernel/time/tick-broadcast.c | |
3 | * | |
4 | * This file contains functions which emulate a local clock-event | |
5 | * device via a broadcast event source. | |
6 | * | |
7 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> | |
8 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar | |
9 | * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner | |
10 | * | |
11 | * This code is licenced under the GPL version 2. For details see | |
12 | * kernel-base/COPYING. | |
13 | */ | |
14 | #include <linux/cpu.h> | |
15 | #include <linux/err.h> | |
16 | #include <linux/hrtimer.h> | |
d7b90689 | 17 | #include <linux/interrupt.h> |
f8381cba TG |
18 | #include <linux/percpu.h> |
19 | #include <linux/profile.h> | |
20 | #include <linux/sched.h> | |
12ad1000 | 21 | #include <linux/smp.h> |
ccf33d68 | 22 | #include <linux/module.h> |
f8381cba TG |
23 | |
24 | #include "tick-internal.h" | |
25 | ||
26 | /* | |
27 | * Broadcast support for broken x86 hardware, where the local apic | |
28 | * timer stops in C3 state. | |
29 | */ | |
30 | ||
a52f5c56 | 31 | static struct tick_device tick_broadcast_device; |
b352bc1c | 32 | static cpumask_var_t tick_broadcast_mask; |
07bd1172 | 33 | static cpumask_var_t tick_broadcast_on; |
b352bc1c | 34 | static cpumask_var_t tmpmask; |
b5f91da0 | 35 | static DEFINE_RAW_SPINLOCK(tick_broadcast_lock); |
592a438f | 36 | static int tick_broadcast_forced; |
f8381cba | 37 | |
5590a536 TG |
38 | #ifdef CONFIG_TICK_ONESHOT |
39 | static void tick_broadcast_clear_oneshot(int cpu); | |
080873ce | 40 | static void tick_resume_broadcast_oneshot(struct clock_event_device *bc); |
5590a536 TG |
41 | #else |
42 | static inline void tick_broadcast_clear_oneshot(int cpu) { } | |
080873ce | 43 | static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { } |
5590a536 TG |
44 | #endif |
45 | ||
289f480a IM |
46 | /* |
47 | * Debugging: see timer_list.c | |
48 | */ | |
49 | struct tick_device *tick_get_broadcast_device(void) | |
50 | { | |
51 | return &tick_broadcast_device; | |
52 | } | |
53 | ||
6b954823 | 54 | struct cpumask *tick_get_broadcast_mask(void) |
289f480a | 55 | { |
b352bc1c | 56 | return tick_broadcast_mask; |
289f480a IM |
57 | } |
58 | ||
f8381cba TG |
59 | /* |
60 | * Start the device in periodic mode | |
61 | */ | |
62 | static void tick_broadcast_start_periodic(struct clock_event_device *bc) | |
63 | { | |
18de5bc4 | 64 | if (bc) |
f8381cba TG |
65 | tick_setup_periodic(bc, 1); |
66 | } | |
67 | ||
68 | /* | |
69 | * Check, if the device can be utilized as broadcast device: | |
70 | */ | |
45cb8e01 TG |
71 | static bool tick_check_broadcast_device(struct clock_event_device *curdev, |
72 | struct clock_event_device *newdev) | |
73 | { | |
74 | if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) || | |
245a3496 | 75 | (newdev->features & CLOCK_EVT_FEAT_PERCPU) || |
45cb8e01 TG |
76 | (newdev->features & CLOCK_EVT_FEAT_C3STOP)) |
77 | return false; | |
78 | ||
79 | if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT && | |
80 | !(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) | |
81 | return false; | |
82 | ||
83 | return !curdev || newdev->rating > curdev->rating; | |
84 | } | |
85 | ||
86 | /* | |
87 | * Conditionally install/replace broadcast device | |
88 | */ | |
7172a286 | 89 | void tick_install_broadcast_device(struct clock_event_device *dev) |
f8381cba | 90 | { |
6f7a05d7 TG |
91 | struct clock_event_device *cur = tick_broadcast_device.evtdev; |
92 | ||
45cb8e01 | 93 | if (!tick_check_broadcast_device(cur, dev)) |
7172a286 | 94 | return; |
45cb8e01 | 95 | |
ccf33d68 TG |
96 | if (!try_module_get(dev->owner)) |
97 | return; | |
f8381cba | 98 | |
45cb8e01 | 99 | clockevents_exchange_device(cur, dev); |
6f7a05d7 TG |
100 | if (cur) |
101 | cur->event_handler = clockevents_handle_noop; | |
f8381cba | 102 | tick_broadcast_device.evtdev = dev; |
b352bc1c | 103 | if (!cpumask_empty(tick_broadcast_mask)) |
f8381cba | 104 | tick_broadcast_start_periodic(dev); |
c038c1c4 SB |
105 | /* |
106 | * Inform all cpus about this. We might be in a situation | |
107 | * where we did not switch to oneshot mode because the per cpu | |
108 | * devices are affected by CLOCK_EVT_FEAT_C3STOP and the lack | |
109 | * of a oneshot capable broadcast device. Without that | |
110 | * notification the systems stays stuck in periodic mode | |
111 | * forever. | |
112 | */ | |
113 | if (dev->features & CLOCK_EVT_FEAT_ONESHOT) | |
114 | tick_clock_notify(); | |
f8381cba TG |
115 | } |
116 | ||
117 | /* | |
118 | * Check, if the device is the broadcast device | |
119 | */ | |
120 | int tick_is_broadcast_device(struct clock_event_device *dev) | |
121 | { | |
122 | return (dev && tick_broadcast_device.evtdev == dev); | |
123 | } | |
124 | ||
627ee794 TG |
125 | int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq) |
126 | { | |
127 | int ret = -ENODEV; | |
128 | ||
129 | if (tick_is_broadcast_device(dev)) { | |
130 | raw_spin_lock(&tick_broadcast_lock); | |
131 | ret = __clockevents_update_freq(dev, freq); | |
132 | raw_spin_unlock(&tick_broadcast_lock); | |
133 | } | |
134 | return ret; | |
135 | } | |
136 | ||
137 | ||
12ad1000 MR |
138 | static void err_broadcast(const struct cpumask *mask) |
139 | { | |
140 | pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n"); | |
141 | } | |
142 | ||
5d1d9a29 MR |
143 | static void tick_device_setup_broadcast_func(struct clock_event_device *dev) |
144 | { | |
145 | if (!dev->broadcast) | |
146 | dev->broadcast = tick_broadcast; | |
147 | if (!dev->broadcast) { | |
148 | pr_warn_once("%s depends on broadcast, but no broadcast function available\n", | |
149 | dev->name); | |
150 | dev->broadcast = err_broadcast; | |
151 | } | |
152 | } | |
153 | ||
f8381cba TG |
154 | /* |
155 | * Check, if the device is disfunctional and a place holder, which | |
156 | * needs to be handled by the broadcast device. | |
157 | */ | |
158 | int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) | |
159 | { | |
07bd1172 | 160 | struct clock_event_device *bc = tick_broadcast_device.evtdev; |
f8381cba | 161 | unsigned long flags; |
e0454311 | 162 | int ret = 0; |
f8381cba | 163 | |
b5f91da0 | 164 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
f8381cba TG |
165 | |
166 | /* | |
167 | * Devices might be registered with both periodic and oneshot | |
168 | * mode disabled. This signals, that the device needs to be | |
169 | * operated from the broadcast device and is a placeholder for | |
170 | * the cpu local device. | |
171 | */ | |
172 | if (!tick_device_is_functional(dev)) { | |
173 | dev->event_handler = tick_handle_periodic; | |
5d1d9a29 | 174 | tick_device_setup_broadcast_func(dev); |
b352bc1c | 175 | cpumask_set_cpu(cpu, tick_broadcast_mask); |
a272dcca SB |
176 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) |
177 | tick_broadcast_start_periodic(bc); | |
178 | else | |
179 | tick_broadcast_setup_oneshot(bc); | |
f8381cba | 180 | ret = 1; |
5590a536 TG |
181 | } else { |
182 | /* | |
07bd1172 TG |
183 | * Clear the broadcast bit for this cpu if the |
184 | * device is not power state affected. | |
5590a536 | 185 | */ |
07bd1172 | 186 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) |
b352bc1c | 187 | cpumask_clear_cpu(cpu, tick_broadcast_mask); |
07bd1172 | 188 | else |
5d1d9a29 | 189 | tick_device_setup_broadcast_func(dev); |
07bd1172 TG |
190 | |
191 | /* | |
192 | * Clear the broadcast bit if the CPU is not in | |
193 | * periodic broadcast on state. | |
194 | */ | |
195 | if (!cpumask_test_cpu(cpu, tick_broadcast_on)) | |
196 | cpumask_clear_cpu(cpu, tick_broadcast_mask); | |
197 | ||
198 | switch (tick_broadcast_device.mode) { | |
199 | case TICKDEV_MODE_ONESHOT: | |
200 | /* | |
201 | * If the system is in oneshot mode we can | |
202 | * unconditionally clear the oneshot mask bit, | |
203 | * because the CPU is running and therefore | |
204 | * not in an idle state which causes the power | |
205 | * state affected device to stop. Let the | |
206 | * caller initialize the device. | |
207 | */ | |
208 | tick_broadcast_clear_oneshot(cpu); | |
209 | ret = 0; | |
210 | break; | |
211 | ||
212 | case TICKDEV_MODE_PERIODIC: | |
213 | /* | |
214 | * If the system is in periodic mode, check | |
215 | * whether the broadcast device can be | |
216 | * switched off now. | |
217 | */ | |
218 | if (cpumask_empty(tick_broadcast_mask) && bc) | |
219 | clockevents_shutdown(bc); | |
220 | /* | |
221 | * If we kept the cpu in the broadcast mask, | |
222 | * tell the caller to leave the per cpu device | |
223 | * in shutdown state. The periodic interrupt | |
e0454311 TG |
224 | * is delivered by the broadcast device, if |
225 | * the broadcast device exists and is not | |
226 | * hrtimer based. | |
07bd1172 | 227 | */ |
e0454311 TG |
228 | if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER)) |
229 | ret = cpumask_test_cpu(cpu, tick_broadcast_mask); | |
07bd1172 TG |
230 | break; |
231 | default: | |
07bd1172 | 232 | break; |
5590a536 TG |
233 | } |
234 | } | |
b5f91da0 | 235 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
f8381cba TG |
236 | return ret; |
237 | } | |
238 | ||
12572dbb MR |
239 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
240 | int tick_receive_broadcast(void) | |
241 | { | |
242 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); | |
243 | struct clock_event_device *evt = td->evtdev; | |
244 | ||
245 | if (!evt) | |
246 | return -ENODEV; | |
247 | ||
248 | if (!evt->event_handler) | |
249 | return -EINVAL; | |
250 | ||
251 | evt->event_handler(evt); | |
252 | return 0; | |
253 | } | |
254 | #endif | |
255 | ||
f8381cba | 256 | /* |
6b954823 | 257 | * Broadcast the event to the cpus, which are set in the mask (mangled). |
f8381cba | 258 | */ |
2951d5c0 | 259 | static bool tick_do_broadcast(struct cpumask *mask) |
f8381cba | 260 | { |
186e3cb8 | 261 | int cpu = smp_processor_id(); |
f8381cba | 262 | struct tick_device *td; |
2951d5c0 | 263 | bool local = false; |
f8381cba TG |
264 | |
265 | /* | |
266 | * Check, if the current cpu is in the mask | |
267 | */ | |
6b954823 | 268 | if (cpumask_test_cpu(cpu, mask)) { |
8eb23126 TG |
269 | struct clock_event_device *bc = tick_broadcast_device.evtdev; |
270 | ||
6b954823 | 271 | cpumask_clear_cpu(cpu, mask); |
8eb23126 TG |
272 | /* |
273 | * We only run the local handler, if the broadcast | |
274 | * device is not hrtimer based. Otherwise we run into | |
275 | * a hrtimer recursion. | |
276 | * | |
277 | * local timer_interrupt() | |
278 | * local_handler() | |
279 | * expire_hrtimers() | |
280 | * bc_handler() | |
281 | * local_handler() | |
282 | * expire_hrtimers() | |
283 | */ | |
284 | local = !(bc->features & CLOCK_EVT_FEAT_HRTIMER); | |
f8381cba TG |
285 | } |
286 | ||
6b954823 | 287 | if (!cpumask_empty(mask)) { |
f8381cba TG |
288 | /* |
289 | * It might be necessary to actually check whether the devices | |
290 | * have different broadcast functions. For now, just use the | |
291 | * one of the first device. This works as long as we have this | |
292 | * misfeature only on x86 (lapic) | |
293 | */ | |
6b954823 RR |
294 | td = &per_cpu(tick_cpu_device, cpumask_first(mask)); |
295 | td->evtdev->broadcast(mask); | |
f8381cba | 296 | } |
2951d5c0 | 297 | return local; |
f8381cba TG |
298 | } |
299 | ||
300 | /* | |
301 | * Periodic broadcast: | |
302 | * - invoke the broadcast handlers | |
303 | */ | |
2951d5c0 | 304 | static bool tick_do_periodic_broadcast(void) |
f8381cba | 305 | { |
b352bc1c | 306 | cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask); |
2951d5c0 | 307 | return tick_do_broadcast(tmpmask); |
f8381cba TG |
308 | } |
309 | ||
310 | /* | |
311 | * Event handler for periodic broadcast ticks | |
312 | */ | |
313 | static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | |
314 | { | |
2951d5c0 TG |
315 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
316 | bool bc_local; | |
d4496b39 | 317 | |
627ee794 | 318 | raw_spin_lock(&tick_broadcast_lock); |
c4288334 TG |
319 | |
320 | /* Handle spurious interrupts gracefully */ | |
321 | if (clockevent_state_shutdown(tick_broadcast_device.evtdev)) { | |
322 | raw_spin_unlock(&tick_broadcast_lock); | |
323 | return; | |
324 | } | |
325 | ||
2951d5c0 | 326 | bc_local = tick_do_periodic_broadcast(); |
627ee794 | 327 | |
472c4a94 | 328 | if (clockevent_state_oneshot(dev)) { |
2951d5c0 | 329 | ktime_t next = ktime_add(dev->next_event, tick_period); |
f8381cba | 330 | |
2951d5c0 TG |
331 | clockevents_program_event(dev, next, true); |
332 | } | |
333 | raw_spin_unlock(&tick_broadcast_lock); | |
f8381cba TG |
334 | |
335 | /* | |
2951d5c0 TG |
336 | * We run the handler of the local cpu after dropping |
337 | * tick_broadcast_lock because the handler might deadlock when | |
338 | * trying to switch to oneshot mode. | |
f8381cba | 339 | */ |
2951d5c0 TG |
340 | if (bc_local) |
341 | td->evtdev->event_handler(td->evtdev); | |
f8381cba TG |
342 | } |
343 | ||
592a438f TG |
344 | /** |
345 | * tick_broadcast_control - Enable/disable or force broadcast mode | |
346 | * @mode: The selected broadcast mode | |
347 | * | |
348 | * Called when the system enters a state where affected tick devices | |
349 | * might stop. Note: TICK_BROADCAST_FORCE cannot be undone. | |
350 | * | |
351 | * Called with interrupts disabled, so clockevents_lock is not | |
352 | * required here because the local clock event device cannot go away | |
353 | * under us. | |
f8381cba | 354 | */ |
592a438f | 355 | void tick_broadcast_control(enum tick_broadcast_mode mode) |
f8381cba TG |
356 | { |
357 | struct clock_event_device *bc, *dev; | |
358 | struct tick_device *td; | |
9c17bcda | 359 | int cpu, bc_stopped; |
f8381cba | 360 | |
592a438f | 361 | td = this_cpu_ptr(&tick_cpu_device); |
f8381cba | 362 | dev = td->evtdev; |
f8381cba TG |
363 | |
364 | /* | |
1595f452 | 365 | * Is the device not affected by the powerstate ? |
f8381cba | 366 | */ |
1595f452 | 367 | if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP)) |
592a438f | 368 | return; |
f8381cba | 369 | |
3dfbc884 | 370 | if (!tick_device_is_functional(dev)) |
592a438f | 371 | return; |
1595f452 | 372 | |
592a438f TG |
373 | raw_spin_lock(&tick_broadcast_lock); |
374 | cpu = smp_processor_id(); | |
375 | bc = tick_broadcast_device.evtdev; | |
b352bc1c | 376 | bc_stopped = cpumask_empty(tick_broadcast_mask); |
9c17bcda | 377 | |
592a438f TG |
378 | switch (mode) { |
379 | case TICK_BROADCAST_FORCE: | |
380 | tick_broadcast_forced = 1; | |
381 | case TICK_BROADCAST_ON: | |
07bd1172 | 382 | cpumask_set_cpu(cpu, tick_broadcast_on); |
b352bc1c | 383 | if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) { |
e0454311 TG |
384 | /* |
385 | * Only shutdown the cpu local device, if: | |
386 | * | |
387 | * - the broadcast device exists | |
388 | * - the broadcast device is not a hrtimer based one | |
389 | * - the broadcast device is in periodic mode to | |
390 | * avoid a hickup during switch to oneshot mode | |
391 | */ | |
392 | if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) && | |
393 | tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) | |
2344abbc | 394 | clockevents_shutdown(dev); |
f8381cba | 395 | } |
1595f452 | 396 | break; |
592a438f TG |
397 | |
398 | case TICK_BROADCAST_OFF: | |
399 | if (tick_broadcast_forced) | |
07bd1172 TG |
400 | break; |
401 | cpumask_clear_cpu(cpu, tick_broadcast_on); | |
402 | if (!tick_device_is_functional(dev)) | |
403 | break; | |
404 | if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) { | |
07454bff TG |
405 | if (tick_broadcast_device.mode == |
406 | TICKDEV_MODE_PERIODIC) | |
f8381cba TG |
407 | tick_setup_periodic(dev, 0); |
408 | } | |
1595f452 | 409 | break; |
f8381cba TG |
410 | } |
411 | ||
c4d029f2 TG |
412 | if (bc) { |
413 | if (cpumask_empty(tick_broadcast_mask)) { | |
414 | if (!bc_stopped) | |
415 | clockevents_shutdown(bc); | |
416 | } else if (bc_stopped) { | |
417 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) | |
418 | tick_broadcast_start_periodic(bc); | |
419 | else | |
420 | tick_broadcast_setup_oneshot(bc); | |
421 | } | |
f8381cba | 422 | } |
592a438f | 423 | raw_spin_unlock(&tick_broadcast_lock); |
f8381cba | 424 | } |
592a438f | 425 | EXPORT_SYMBOL_GPL(tick_broadcast_control); |
f8381cba TG |
426 | |
427 | /* | |
428 | * Set the periodic handler depending on broadcast on/off | |
429 | */ | |
430 | void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast) | |
431 | { | |
432 | if (!broadcast) | |
433 | dev->event_handler = tick_handle_periodic; | |
434 | else | |
435 | dev->event_handler = tick_handle_periodic_broadcast; | |
436 | } | |
437 | ||
a49b116d | 438 | #ifdef CONFIG_HOTPLUG_CPU |
f8381cba TG |
439 | /* |
440 | * Remove a CPU from broadcasting | |
441 | */ | |
a49b116d | 442 | void tick_shutdown_broadcast(unsigned int cpu) |
f8381cba TG |
443 | { |
444 | struct clock_event_device *bc; | |
445 | unsigned long flags; | |
f8381cba | 446 | |
b5f91da0 | 447 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
f8381cba TG |
448 | |
449 | bc = tick_broadcast_device.evtdev; | |
b352bc1c | 450 | cpumask_clear_cpu(cpu, tick_broadcast_mask); |
07bd1172 | 451 | cpumask_clear_cpu(cpu, tick_broadcast_on); |
f8381cba TG |
452 | |
453 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { | |
b352bc1c | 454 | if (bc && cpumask_empty(tick_broadcast_mask)) |
2344abbc | 455 | clockevents_shutdown(bc); |
f8381cba TG |
456 | } |
457 | ||
b5f91da0 | 458 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
f8381cba | 459 | } |
a49b116d | 460 | #endif |
79bf2bb3 | 461 | |
6321dd60 TG |
462 | void tick_suspend_broadcast(void) |
463 | { | |
464 | struct clock_event_device *bc; | |
465 | unsigned long flags; | |
466 | ||
b5f91da0 | 467 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
6321dd60 TG |
468 | |
469 | bc = tick_broadcast_device.evtdev; | |
18de5bc4 | 470 | if (bc) |
2344abbc | 471 | clockevents_shutdown(bc); |
6321dd60 | 472 | |
b5f91da0 | 473 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
6321dd60 TG |
474 | } |
475 | ||
f46481d0 TG |
476 | /* |
477 | * This is called from tick_resume_local() on a resuming CPU. That's | |
478 | * called from the core resume function, tick_unfreeze() and the magic XEN | |
479 | * resume hackery. | |
480 | * | |
481 | * In none of these cases the broadcast device mode can change and the | |
482 | * bit of the resuming CPU in the broadcast mask is safe as well. | |
483 | */ | |
484 | bool tick_resume_check_broadcast(void) | |
485 | { | |
486 | if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT) | |
487 | return false; | |
488 | else | |
489 | return cpumask_test_cpu(smp_processor_id(), tick_broadcast_mask); | |
490 | } | |
491 | ||
492 | void tick_resume_broadcast(void) | |
6321dd60 TG |
493 | { |
494 | struct clock_event_device *bc; | |
495 | unsigned long flags; | |
6321dd60 | 496 | |
b5f91da0 | 497 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
6321dd60 TG |
498 | |
499 | bc = tick_broadcast_device.evtdev; | |
6321dd60 | 500 | |
cd05a1f8 | 501 | if (bc) { |
554ef387 | 502 | clockevents_tick_resume(bc); |
18de5bc4 | 503 | |
cd05a1f8 TG |
504 | switch (tick_broadcast_device.mode) { |
505 | case TICKDEV_MODE_PERIODIC: | |
b352bc1c | 506 | if (!cpumask_empty(tick_broadcast_mask)) |
cd05a1f8 | 507 | tick_broadcast_start_periodic(bc); |
cd05a1f8 TG |
508 | break; |
509 | case TICKDEV_MODE_ONESHOT: | |
b352bc1c | 510 | if (!cpumask_empty(tick_broadcast_mask)) |
080873ce | 511 | tick_resume_broadcast_oneshot(bc); |
cd05a1f8 TG |
512 | break; |
513 | } | |
6321dd60 | 514 | } |
b5f91da0 | 515 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
6321dd60 TG |
516 | } |
517 | ||
79bf2bb3 TG |
518 | #ifdef CONFIG_TICK_ONESHOT |
519 | ||
b352bc1c | 520 | static cpumask_var_t tick_broadcast_oneshot_mask; |
26517f3e | 521 | static cpumask_var_t tick_broadcast_pending_mask; |
989dcb64 | 522 | static cpumask_var_t tick_broadcast_force_mask; |
79bf2bb3 | 523 | |
289f480a | 524 | /* |
6b954823 | 525 | * Exposed for debugging: see timer_list.c |
289f480a | 526 | */ |
6b954823 | 527 | struct cpumask *tick_get_broadcast_oneshot_mask(void) |
289f480a | 528 | { |
b352bc1c | 529 | return tick_broadcast_oneshot_mask; |
289f480a IM |
530 | } |
531 | ||
eaa907c5 TG |
532 | /* |
533 | * Called before going idle with interrupts disabled. Checks whether a | |
534 | * broadcast event from the other core is about to happen. We detected | |
535 | * that in tick_broadcast_oneshot_control(). The callsite can use this | |
536 | * to avoid a deep idle transition as we are about to get the | |
537 | * broadcast IPI right away. | |
538 | */ | |
539 | int tick_check_broadcast_expired(void) | |
540 | { | |
541 | return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask); | |
542 | } | |
543 | ||
d2348fb6 DL |
544 | /* |
545 | * Set broadcast interrupt affinity | |
546 | */ | |
547 | static void tick_broadcast_set_affinity(struct clock_event_device *bc, | |
548 | const struct cpumask *cpumask) | |
549 | { | |
550 | if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ)) | |
551 | return; | |
552 | ||
553 | if (cpumask_equal(bc->cpumask, cpumask)) | |
554 | return; | |
555 | ||
556 | bc->cpumask = cpumask; | |
557 | irq_set_affinity(bc->irq, bc->cpumask); | |
558 | } | |
559 | ||
298dbd1c TG |
560 | static void tick_broadcast_set_event(struct clock_event_device *bc, int cpu, |
561 | ktime_t expires) | |
79bf2bb3 | 562 | { |
472c4a94 | 563 | if (!clockevent_state_oneshot(bc)) |
d7eb231c | 564 | clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT); |
b9a6a235 | 565 | |
298dbd1c TG |
566 | clockevents_program_event(bc, expires, 1); |
567 | tick_broadcast_set_affinity(bc, cpumask_of(cpu)); | |
79bf2bb3 TG |
568 | } |
569 | ||
080873ce | 570 | static void tick_resume_broadcast_oneshot(struct clock_event_device *bc) |
cd05a1f8 | 571 | { |
d7eb231c | 572 | clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT); |
cd05a1f8 TG |
573 | } |
574 | ||
fb02fbc1 TG |
575 | /* |
576 | * Called from irq_enter() when idle was interrupted to reenable the | |
577 | * per cpu device. | |
578 | */ | |
e8fcaa5c | 579 | void tick_check_oneshot_broadcast_this_cpu(void) |
fb02fbc1 | 580 | { |
e8fcaa5c | 581 | if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) { |
22127e93 | 582 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
fb02fbc1 | 583 | |
1f73a980 TG |
584 | /* |
585 | * We might be in the middle of switching over from | |
586 | * periodic to oneshot. If the CPU has not yet | |
587 | * switched over, leave the device alone. | |
588 | */ | |
589 | if (td->mode == TICKDEV_MODE_ONESHOT) { | |
d7eb231c | 590 | clockevents_switch_state(td->evtdev, |
77e32c89 | 591 | CLOCK_EVT_STATE_ONESHOT); |
1f73a980 | 592 | } |
fb02fbc1 TG |
593 | } |
594 | } | |
595 | ||
79bf2bb3 TG |
596 | /* |
597 | * Handle oneshot mode broadcasting | |
598 | */ | |
599 | static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) | |
600 | { | |
601 | struct tick_device *td; | |
cdc6f27d | 602 | ktime_t now, next_event; |
d2348fb6 | 603 | int cpu, next_cpu = 0; |
298dbd1c | 604 | bool bc_local; |
79bf2bb3 | 605 | |
b5f91da0 | 606 | raw_spin_lock(&tick_broadcast_lock); |
79bf2bb3 | 607 | dev->next_event.tv64 = KTIME_MAX; |
cdc6f27d | 608 | next_event.tv64 = KTIME_MAX; |
b352bc1c | 609 | cpumask_clear(tmpmask); |
79bf2bb3 TG |
610 | now = ktime_get(); |
611 | /* Find all expired events */ | |
b352bc1c | 612 | for_each_cpu(cpu, tick_broadcast_oneshot_mask) { |
79bf2bb3 | 613 | td = &per_cpu(tick_cpu_device, cpu); |
d2348fb6 | 614 | if (td->evtdev->next_event.tv64 <= now.tv64) { |
b352bc1c | 615 | cpumask_set_cpu(cpu, tmpmask); |
26517f3e TG |
616 | /* |
617 | * Mark the remote cpu in the pending mask, so | |
618 | * it can avoid reprogramming the cpu local | |
619 | * timer in tick_broadcast_oneshot_control(). | |
620 | */ | |
621 | cpumask_set_cpu(cpu, tick_broadcast_pending_mask); | |
d2348fb6 | 622 | } else if (td->evtdev->next_event.tv64 < next_event.tv64) { |
cdc6f27d | 623 | next_event.tv64 = td->evtdev->next_event.tv64; |
d2348fb6 DL |
624 | next_cpu = cpu; |
625 | } | |
79bf2bb3 TG |
626 | } |
627 | ||
2938d275 TG |
628 | /* |
629 | * Remove the current cpu from the pending mask. The event is | |
630 | * delivered immediately in tick_do_broadcast() ! | |
631 | */ | |
632 | cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask); | |
633 | ||
989dcb64 TG |
634 | /* Take care of enforced broadcast requests */ |
635 | cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask); | |
636 | cpumask_clear(tick_broadcast_force_mask); | |
637 | ||
c9b5a266 TG |
638 | /* |
639 | * Sanity check. Catch the case where we try to broadcast to | |
640 | * offline cpus. | |
641 | */ | |
642 | if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask))) | |
643 | cpumask_and(tmpmask, tmpmask, cpu_online_mask); | |
644 | ||
79bf2bb3 | 645 | /* |
298dbd1c | 646 | * Wakeup the cpus which have an expired event. |
cdc6f27d | 647 | */ |
298dbd1c | 648 | bc_local = tick_do_broadcast(tmpmask); |
cdc6f27d TG |
649 | |
650 | /* | |
651 | * Two reasons for reprogram: | |
652 | * | |
653 | * - The global event did not expire any CPU local | |
654 | * events. This happens in dyntick mode, as the maximum PIT | |
655 | * delta is quite small. | |
656 | * | |
657 | * - There are pending events on sleeping CPUs which were not | |
658 | * in the event mask | |
79bf2bb3 | 659 | */ |
298dbd1c TG |
660 | if (next_event.tv64 != KTIME_MAX) |
661 | tick_broadcast_set_event(dev, next_cpu, next_event); | |
662 | ||
b5f91da0 | 663 | raw_spin_unlock(&tick_broadcast_lock); |
298dbd1c TG |
664 | |
665 | if (bc_local) { | |
666 | td = this_cpu_ptr(&tick_cpu_device); | |
667 | td->evtdev->event_handler(td->evtdev); | |
668 | } | |
79bf2bb3 TG |
669 | } |
670 | ||
5d1638ac PM |
671 | static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu) |
672 | { | |
673 | if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER)) | |
674 | return 0; | |
675 | if (bc->next_event.tv64 == KTIME_MAX) | |
676 | return 0; | |
677 | return bc->bound_on == cpu ? -EBUSY : 0; | |
678 | } | |
679 | ||
680 | static void broadcast_shutdown_local(struct clock_event_device *bc, | |
681 | struct clock_event_device *dev) | |
682 | { | |
683 | /* | |
684 | * For hrtimer based broadcasting we cannot shutdown the cpu | |
685 | * local device if our own event is the first one to expire or | |
686 | * if we own the broadcast timer. | |
687 | */ | |
688 | if (bc->features & CLOCK_EVT_FEAT_HRTIMER) { | |
689 | if (broadcast_needs_cpu(bc, smp_processor_id())) | |
690 | return; | |
691 | if (dev->next_event.tv64 < bc->next_event.tv64) | |
692 | return; | |
693 | } | |
d7eb231c | 694 | clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); |
5d1638ac PM |
695 | } |
696 | ||
f32dd117 | 697 | int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) |
79bf2bb3 TG |
698 | { |
699 | struct clock_event_device *bc, *dev; | |
da7e6f45 | 700 | int cpu, ret = 0; |
1fe5d5c3 | 701 | ktime_t now; |
79bf2bb3 | 702 | |
b78f3f3c TG |
703 | /* |
704 | * If there is no broadcast device, tell the caller not to go | |
705 | * into deep idle. | |
706 | */ | |
707 | if (!tick_broadcast_device.evtdev) | |
708 | return -EBUSY; | |
709 | ||
e3ac79e0 | 710 | dev = this_cpu_ptr(&tick_cpu_device)->evtdev; |
79bf2bb3 | 711 | |
1fe5d5c3 | 712 | raw_spin_lock(&tick_broadcast_lock); |
7372b0b1 | 713 | bc = tick_broadcast_device.evtdev; |
1fe5d5c3 | 714 | cpu = smp_processor_id(); |
79bf2bb3 | 715 | |
1fe5d5c3 | 716 | if (state == TICK_BROADCAST_ENTER) { |
d5113e13 TG |
717 | /* |
718 | * If the current CPU owns the hrtimer broadcast | |
719 | * mechanism, it cannot go deep idle and we do not add | |
720 | * the CPU to the broadcast mask. We don't have to go | |
721 | * through the EXIT path as the local timer is not | |
722 | * shutdown. | |
723 | */ | |
724 | ret = broadcast_needs_cpu(bc, cpu); | |
725 | if (ret) | |
726 | goto out; | |
727 | ||
e3ac79e0 TG |
728 | /* |
729 | * If the broadcast device is in periodic mode, we | |
730 | * return. | |
731 | */ | |
d3325726 TG |
732 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { |
733 | /* If it is a hrtimer based broadcast, return busy */ | |
734 | if (bc->features & CLOCK_EVT_FEAT_HRTIMER) | |
735 | ret = -EBUSY; | |
e3ac79e0 | 736 | goto out; |
d3325726 | 737 | } |
e3ac79e0 | 738 | |
b352bc1c | 739 | if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { |
2938d275 | 740 | WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); |
d5113e13 TG |
741 | |
742 | /* Conditionally shut down the local timer. */ | |
5d1638ac | 743 | broadcast_shutdown_local(bc, dev); |
d5113e13 | 744 | |
989dcb64 TG |
745 | /* |
746 | * We only reprogram the broadcast timer if we | |
747 | * did not mark ourself in the force mask and | |
748 | * if the cpu local event is earlier than the | |
749 | * broadcast event. If the current CPU is in | |
750 | * the force mask, then we are going to be | |
0cc5281a TG |
751 | * woken by the IPI right away; we return |
752 | * busy, so the CPU does not try to go deep | |
753 | * idle. | |
989dcb64 | 754 | */ |
0cc5281a TG |
755 | if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) { |
756 | ret = -EBUSY; | |
757 | } else if (dev->next_event.tv64 < bc->next_event.tv64) { | |
298dbd1c | 758 | tick_broadcast_set_event(bc, cpu, dev->next_event); |
d5113e13 TG |
759 | /* |
760 | * In case of hrtimer broadcasts the | |
761 | * programming might have moved the | |
762 | * timer to this cpu. If yes, remove | |
763 | * us from the broadcast mask and | |
764 | * return busy. | |
765 | */ | |
766 | ret = broadcast_needs_cpu(bc, cpu); | |
767 | if (ret) { | |
768 | cpumask_clear_cpu(cpu, | |
769 | tick_broadcast_oneshot_mask); | |
770 | } | |
0cc5281a | 771 | } |
79bf2bb3 TG |
772 | } |
773 | } else { | |
b352bc1c | 774 | if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { |
d7eb231c | 775 | clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); |
26517f3e TG |
776 | /* |
777 | * The cpu which was handling the broadcast | |
778 | * timer marked this cpu in the broadcast | |
779 | * pending mask and fired the broadcast | |
780 | * IPI. So we are going to handle the expired | |
781 | * event anyway via the broadcast IPI | |
782 | * handler. No need to reprogram the timer | |
783 | * with an already expired event. | |
784 | */ | |
785 | if (cpumask_test_and_clear_cpu(cpu, | |
786 | tick_broadcast_pending_mask)) | |
787 | goto out; | |
788 | ||
ea8deb8d DL |
789 | /* |
790 | * Bail out if there is no next event. | |
791 | */ | |
792 | if (dev->next_event.tv64 == KTIME_MAX) | |
793 | goto out; | |
989dcb64 TG |
794 | /* |
795 | * If the pending bit is not set, then we are | |
796 | * either the CPU handling the broadcast | |
797 | * interrupt or we got woken by something else. | |
798 | * | |
799 | * We are not longer in the broadcast mask, so | |
800 | * if the cpu local expiry time is already | |
801 | * reached, we would reprogram the cpu local | |
802 | * timer with an already expired event. | |
803 | * | |
804 | * This can lead to a ping-pong when we return | |
805 | * to idle and therefor rearm the broadcast | |
806 | * timer before the cpu local timer was able | |
807 | * to fire. This happens because the forced | |
808 | * reprogramming makes sure that the event | |
809 | * will happen in the future and depending on | |
810 | * the min_delta setting this might be far | |
811 | * enough out that the ping-pong starts. | |
812 | * | |
813 | * If the cpu local next_event has expired | |
814 | * then we know that the broadcast timer | |
815 | * next_event has expired as well and | |
816 | * broadcast is about to be handled. So we | |
817 | * avoid reprogramming and enforce that the | |
818 | * broadcast handler, which did not run yet, | |
819 | * will invoke the cpu local handler. | |
820 | * | |
821 | * We cannot call the handler directly from | |
822 | * here, because we might be in a NOHZ phase | |
823 | * and we did not go through the irq_enter() | |
824 | * nohz fixups. | |
825 | */ | |
826 | now = ktime_get(); | |
827 | if (dev->next_event.tv64 <= now.tv64) { | |
828 | cpumask_set_cpu(cpu, tick_broadcast_force_mask); | |
829 | goto out; | |
830 | } | |
831 | /* | |
832 | * We got woken by something else. Reprogram | |
833 | * the cpu local timer device. | |
834 | */ | |
26517f3e | 835 | tick_program_event(dev->next_event, 1); |
79bf2bb3 TG |
836 | } |
837 | } | |
26517f3e | 838 | out: |
1fe5d5c3 | 839 | raw_spin_unlock(&tick_broadcast_lock); |
da7e6f45 | 840 | return ret; |
79bf2bb3 TG |
841 | } |
842 | ||
5590a536 TG |
843 | /* |
844 | * Reset the one shot broadcast for a cpu | |
845 | * | |
846 | * Called with tick_broadcast_lock held | |
847 | */ | |
848 | static void tick_broadcast_clear_oneshot(int cpu) | |
849 | { | |
b352bc1c | 850 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); |
dd5fd9b9 | 851 | cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); |
5590a536 TG |
852 | } |
853 | ||
6b954823 RR |
854 | static void tick_broadcast_init_next_event(struct cpumask *mask, |
855 | ktime_t expires) | |
7300711e TG |
856 | { |
857 | struct tick_device *td; | |
858 | int cpu; | |
859 | ||
5db0e1e9 | 860 | for_each_cpu(cpu, mask) { |
7300711e TG |
861 | td = &per_cpu(tick_cpu_device, cpu); |
862 | if (td->evtdev) | |
863 | td->evtdev->next_event = expires; | |
864 | } | |
865 | } | |
866 | ||
79bf2bb3 | 867 | /** |
8dce39c2 | 868 | * tick_broadcast_setup_oneshot - setup the broadcast device |
79bf2bb3 TG |
869 | */ |
870 | void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | |
871 | { | |
07f4beb0 TG |
872 | int cpu = smp_processor_id(); |
873 | ||
9c17bcda TG |
874 | /* Set it up only once ! */ |
875 | if (bc->event_handler != tick_handle_oneshot_broadcast) { | |
472c4a94 | 876 | int was_periodic = clockevent_state_periodic(bc); |
7300711e | 877 | |
9c17bcda | 878 | bc->event_handler = tick_handle_oneshot_broadcast; |
7300711e | 879 | |
7300711e TG |
880 | /* |
881 | * We must be careful here. There might be other CPUs | |
882 | * waiting for periodic broadcast. We need to set the | |
883 | * oneshot_mask bits for those and program the | |
884 | * broadcast device to fire. | |
885 | */ | |
b352bc1c TG |
886 | cpumask_copy(tmpmask, tick_broadcast_mask); |
887 | cpumask_clear_cpu(cpu, tmpmask); | |
888 | cpumask_or(tick_broadcast_oneshot_mask, | |
889 | tick_broadcast_oneshot_mask, tmpmask); | |
6b954823 | 890 | |
b352bc1c | 891 | if (was_periodic && !cpumask_empty(tmpmask)) { |
d7eb231c | 892 | clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT); |
b352bc1c | 893 | tick_broadcast_init_next_event(tmpmask, |
6b954823 | 894 | tick_next_period); |
298dbd1c | 895 | tick_broadcast_set_event(bc, cpu, tick_next_period); |
7300711e TG |
896 | } else |
897 | bc->next_event.tv64 = KTIME_MAX; | |
07f4beb0 TG |
898 | } else { |
899 | /* | |
900 | * The first cpu which switches to oneshot mode sets | |
901 | * the bit for all other cpus which are in the general | |
902 | * (periodic) broadcast mask. So the bit is set and | |
903 | * would prevent the first broadcast enter after this | |
904 | * to program the bc device. | |
905 | */ | |
906 | tick_broadcast_clear_oneshot(cpu); | |
9c17bcda | 907 | } |
79bf2bb3 TG |
908 | } |
909 | ||
910 | /* | |
911 | * Select oneshot operating mode for the broadcast device | |
912 | */ | |
913 | void tick_broadcast_switch_to_oneshot(void) | |
914 | { | |
915 | struct clock_event_device *bc; | |
916 | unsigned long flags; | |
917 | ||
b5f91da0 | 918 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
fa4da365 SS |
919 | |
920 | tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; | |
79bf2bb3 TG |
921 | bc = tick_broadcast_device.evtdev; |
922 | if (bc) | |
923 | tick_broadcast_setup_oneshot(bc); | |
77b0d60c | 924 | |
b5f91da0 | 925 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
79bf2bb3 TG |
926 | } |
927 | ||
a49b116d TG |
928 | #ifdef CONFIG_HOTPLUG_CPU |
929 | void hotplug_cpu__broadcast_tick_pull(int deadcpu) | |
930 | { | |
931 | struct clock_event_device *bc; | |
932 | unsigned long flags; | |
933 | ||
934 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | |
935 | bc = tick_broadcast_device.evtdev; | |
936 | ||
937 | if (bc && broadcast_needs_cpu(bc, deadcpu)) { | |
938 | /* This moves the broadcast assignment to this CPU: */ | |
939 | clockevents_program_event(bc, bc->next_event, 1); | |
940 | } | |
941 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | |
942 | } | |
79bf2bb3 TG |
943 | |
944 | /* | |
945 | * Remove a dead CPU from broadcasting | |
946 | */ | |
a49b116d | 947 | void tick_shutdown_broadcast_oneshot(unsigned int cpu) |
79bf2bb3 | 948 | { |
79bf2bb3 | 949 | unsigned long flags; |
79bf2bb3 | 950 | |
b5f91da0 | 951 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
79bf2bb3 | 952 | |
31d9b393 | 953 | /* |
c9b5a266 TG |
954 | * Clear the broadcast masks for the dead cpu, but do not stop |
955 | * the broadcast device! | |
31d9b393 | 956 | */ |
b352bc1c | 957 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); |
c9b5a266 TG |
958 | cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); |
959 | cpumask_clear_cpu(cpu, tick_broadcast_force_mask); | |
79bf2bb3 | 960 | |
b5f91da0 | 961 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
79bf2bb3 | 962 | } |
a49b116d | 963 | #endif |
79bf2bb3 | 964 | |
27ce4cb4 TG |
965 | /* |
966 | * Check, whether the broadcast device is in one shot mode | |
967 | */ | |
968 | int tick_broadcast_oneshot_active(void) | |
969 | { | |
970 | return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; | |
971 | } | |
972 | ||
3a142a06 TG |
973 | /* |
974 | * Check whether the broadcast device supports oneshot. | |
975 | */ | |
976 | bool tick_broadcast_oneshot_available(void) | |
977 | { | |
978 | struct clock_event_device *bc = tick_broadcast_device.evtdev; | |
979 | ||
980 | return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false; | |
981 | } | |
982 | ||
f32dd117 TG |
983 | #else |
984 | int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) | |
985 | { | |
986 | struct clock_event_device *bc = tick_broadcast_device.evtdev; | |
987 | ||
988 | if (!bc || (bc->features & CLOCK_EVT_FEAT_HRTIMER)) | |
989 | return -EBUSY; | |
990 | ||
991 | return 0; | |
992 | } | |
79bf2bb3 | 993 | #endif |
b352bc1c TG |
994 | |
995 | void __init tick_broadcast_init(void) | |
996 | { | |
fbd44a60 | 997 | zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT); |
07bd1172 | 998 | zalloc_cpumask_var(&tick_broadcast_on, GFP_NOWAIT); |
fbd44a60 | 999 | zalloc_cpumask_var(&tmpmask, GFP_NOWAIT); |
b352bc1c | 1000 | #ifdef CONFIG_TICK_ONESHOT |
fbd44a60 TG |
1001 | zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT); |
1002 | zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT); | |
1003 | zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT); | |
b352bc1c TG |
1004 | #endif |
1005 | } |