Commit | Line | Data |
---|---|---|
c942fddf | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
4126c019 CC |
2 | /* |
3 | * coupled.c - helper functions to enter the same idle state on multiple cpus | |
4 | * | |
5 | * Copyright (c) 2011 Google, Inc. | |
6 | * | |
7 | * Author: Colin Cross <ccross@android.com> | |
4126c019 CC |
8 | */ |
9 | ||
10 | #include <linux/kernel.h> | |
11 | #include <linux/cpu.h> | |
12 | #include <linux/cpuidle.h> | |
13 | #include <linux/mutex.h> | |
14 | #include <linux/sched.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/spinlock.h> | |
17 | ||
18 | #include "cpuidle.h" | |
19 | ||
20 | /** | |
21 | * DOC: Coupled cpuidle states | |
22 | * | |
23 | * On some ARM SMP SoCs (OMAP4460, Tegra 2, and probably more), the | |
24 | * cpus cannot be independently powered down, either due to | |
25 | * sequencing restrictions (on Tegra 2, cpu 0 must be the last to | |
26 | * power down), or due to HW bugs (on OMAP4460, a cpu powering up | |
27 | * will corrupt the gic state unless the other cpu runs a work | |
28 | * around). Each cpu has a power state that it can enter without | |
29 | * coordinating with the other cpu (usually Wait For Interrupt, or | |
30 | * WFI), and one or more "coupled" power states that affect blocks | |
31 | * shared between the cpus (L2 cache, interrupt controller, and | |
32 | * sometimes the whole SoC). Entering a coupled power state must | |
33 | * be tightly controlled on both cpus. | |
34 | * | |
35 | * This file implements a solution, where each cpu will wait in the | |
36 | * WFI state until all cpus are ready to enter a coupled state, at | |
37 | * which point the coupled state function will be called on all | |
38 | * cpus at approximately the same time. | |
39 | * | |
40 | * Once all cpus are ready to enter idle, they are woken by an smp | |
41 | * cross call. At this point, there is a chance that one of the | |
42 | * cpus will find work to do, and choose not to enter idle. A | |
43 | * final pass is needed to guarantee that all cpus will call the | |
44 | * power state enter function at the same time. During this pass, | |
45 | * each cpu will increment the ready counter, and continue once the | |
46 | * ready counter matches the number of online coupled cpus. If any | |
47 | * cpu exits idle, the other cpus will decrement their counter and | |
48 | * retry. | |
49 | * | |
50 | * requested_state stores the deepest coupled idle state each cpu | |
51 | * is ready for. It is assumed that the states are indexed from | |
52 | * shallowest (highest power, lowest exit latency) to deepest | |
53 | * (lowest power, highest exit latency). The requested_state | |
54 | * variable is not locked. It is only written from the cpu that | |
55 | * it stores (or by the on/offlining cpu if that cpu is offline), | |
56 | * and only read after all the cpus are ready for the coupled idle | |
57 | * state are are no longer updating it. | |
58 | * | |
59 | * Three atomic counters are used. alive_count tracks the number | |
60 | * of cpus in the coupled set that are currently or soon will be | |
61 | * online. waiting_count tracks the number of cpus that are in | |
62 | * the waiting loop, in the ready loop, or in the coupled idle state. | |
63 | * ready_count tracks the number of cpus that are in the ready loop | |
64 | * or in the coupled idle state. | |
65 | * | |
66 | * To use coupled cpuidle states, a cpuidle driver must: | |
67 | * | |
68 | * Set struct cpuidle_device.coupled_cpus to the mask of all | |
69 | * coupled cpus, usually the same as cpu_possible_mask if all cpus | |
70 | * are part of the same cluster. The coupled_cpus mask must be | |
71 | * set in the struct cpuidle_device for each cpu. | |
72 | * | |
73 | * Set struct cpuidle_device.safe_state to a state that is not a | |
74 | * coupled state. This is usually WFI. | |
75 | * | |
76 | * Set CPUIDLE_FLAG_COUPLED in struct cpuidle_state.flags for each | |
77 | * state that affects multiple cpus. | |
78 | * | |
79 | * Provide a struct cpuidle_state.enter function for each state | |
80 | * that affects multiple cpus. This function is guaranteed to be | |
81 | * called on all cpus at approximately the same time. The driver | |
82 | * should ensure that the cpus all abort together if any cpu tries | |
83 | * to abort once the function is called. The function should return | |
84 | * with interrupts still disabled. | |
85 | */ | |
86 | ||
87 | /** | |
88 | * struct cpuidle_coupled - data for set of cpus that share a coupled idle state | |
89 | * @coupled_cpus: mask of cpus that are part of the coupled set | |
90 | * @requested_state: array of requested states for cpus in the coupled set | |
91 | * @ready_waiting_counts: combined count of cpus in ready or waiting loops | |
92 | * @online_count: count of cpus that are online | |
93 | * @refcnt: reference count of cpuidle devices that are using this struct | |
94 | * @prevent: flag to prevent coupled idle while a cpu is hotplugging | |
95 | */ | |
96 | struct cpuidle_coupled { | |
97 | cpumask_t coupled_cpus; | |
98 | int requested_state[NR_CPUS]; | |
99 | atomic_t ready_waiting_counts; | |
f983827b | 100 | atomic_t abort_barrier; |
4126c019 CC |
101 | int online_count; |
102 | int refcnt; | |
103 | int prevent; | |
104 | }; | |
105 | ||
106 | #define WAITING_BITS 16 | |
107 | #define MAX_WAITING_CPUS (1 << WAITING_BITS) | |
108 | #define WAITING_MASK (MAX_WAITING_CPUS - 1) | |
109 | #define READY_MASK (~WAITING_MASK) | |
110 | ||
111 | #define CPUIDLE_COUPLED_NOT_IDLE (-1) | |
112 | ||
966a9671 | 113 | static DEFINE_PER_CPU(call_single_data_t, cpuidle_coupled_poke_cb); |
4126c019 CC |
114 | |
115 | /* | |
f983827b | 116 | * The cpuidle_coupled_poke_pending mask is used to avoid calling |
966a9671 | 117 | * __smp_call_function_single with the per cpu call_single_data_t struct already |
4126c019 | 118 | * in use. This prevents a deadlock where two cpus are waiting for each others |
966a9671 | 119 | * call_single_data_t struct to be available |
4126c019 | 120 | */ |
f983827b CC |
121 | static cpumask_t cpuidle_coupled_poke_pending; |
122 | ||
123 | /* | |
124 | * The cpuidle_coupled_poked mask is used to ensure that each cpu has been poked | |
125 | * once to minimize entering the ready loop with a poke pending, which would | |
126 | * require aborting and retrying. | |
127 | */ | |
128 | static cpumask_t cpuidle_coupled_poked; | |
4126c019 | 129 | |
20ff51a3 CC |
130 | /** |
131 | * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus | |
132 | * @dev: cpuidle_device of the calling cpu | |
133 | * @a: atomic variable to hold the barrier | |
134 | * | |
135 | * No caller to this function will return from this function until all online | |
136 | * cpus in the same coupled group have called this function. Once any caller | |
137 | * has returned from this function, the barrier is immediately available for | |
138 | * reuse. | |
139 | * | |
caf4a36e | 140 | * The atomic variable must be initialized to 0 before any cpu calls |
20ff51a3 CC |
141 | * this function, will be reset to 0 before any cpu returns from this function. |
142 | * | |
143 | * Must only be called from within a coupled idle state handler | |
144 | * (state.enter when state.flags has CPUIDLE_FLAG_COUPLED set). | |
145 | * | |
146 | * Provides full smp barrier semantics before and after calling. | |
147 | */ | |
148 | void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a) | |
149 | { | |
150 | int n = dev->coupled->online_count; | |
151 | ||
4e857c58 | 152 | smp_mb__before_atomic(); |
20ff51a3 CC |
153 | atomic_inc(a); |
154 | ||
155 | while (atomic_read(a) < n) | |
156 | cpu_relax(); | |
157 | ||
158 | if (atomic_inc_return(a) == n * 2) { | |
159 | atomic_set(a, 0); | |
160 | return; | |
161 | } | |
162 | ||
163 | while (atomic_read(a) > n) | |
164 | cpu_relax(); | |
165 | } | |
166 | ||
4126c019 CC |
167 | /** |
168 | * cpuidle_state_is_coupled - check if a state is part of a coupled set | |
4126c019 CC |
169 | * @drv: struct cpuidle_driver for the platform |
170 | * @state: index of the target state in drv->states | |
171 | * | |
172 | * Returns true if the target state is coupled with cpus besides this one | |
173 | */ | |
4c1ed5a6 | 174 | bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state) |
4126c019 CC |
175 | { |
176 | return drv->states[state].flags & CPUIDLE_FLAG_COUPLED; | |
177 | } | |
178 | ||
abceaa9c XP |
179 | /** |
180 | * cpuidle_coupled_state_verify - check if the coupled states are correctly set. | |
181 | * @drv: struct cpuidle_driver for the platform | |
182 | * | |
183 | * Returns 0 for valid state values, a negative error code otherwise: | |
184 | * * -EINVAL if any coupled state(safe_state_index) is wrongly set. | |
185 | */ | |
186 | int cpuidle_coupled_state_verify(struct cpuidle_driver *drv) | |
187 | { | |
188 | int i; | |
189 | ||
190 | for (i = drv->state_count - 1; i >= 0; i--) { | |
191 | if (cpuidle_state_is_coupled(drv, i) && | |
192 | (drv->safe_state_index == i || | |
193 | drv->safe_state_index < 0 || | |
194 | drv->safe_state_index >= drv->state_count)) | |
195 | return -EINVAL; | |
196 | } | |
197 | ||
198 | return 0; | |
199 | } | |
200 | ||
4126c019 CC |
201 | /** |
202 | * cpuidle_coupled_set_ready - mark a cpu as ready | |
203 | * @coupled: the struct coupled that contains the current cpu | |
204 | */ | |
205 | static inline void cpuidle_coupled_set_ready(struct cpuidle_coupled *coupled) | |
206 | { | |
207 | atomic_add(MAX_WAITING_CPUS, &coupled->ready_waiting_counts); | |
208 | } | |
209 | ||
210 | /** | |
211 | * cpuidle_coupled_set_not_ready - mark a cpu as not ready | |
212 | * @coupled: the struct coupled that contains the current cpu | |
213 | * | |
214 | * Decrements the ready counter, unless the ready (and thus the waiting) counter | |
215 | * is equal to the number of online cpus. Prevents a race where one cpu | |
216 | * decrements the waiting counter and then re-increments it just before another | |
217 | * cpu has decremented its ready counter, leading to the ready counter going | |
218 | * down from the number of online cpus without going through the coupled idle | |
219 | * state. | |
220 | * | |
221 | * Returns 0 if the counter was decremented successfully, -EINVAL if the ready | |
222 | * counter was equal to the number of online cpus. | |
223 | */ | |
224 | static | |
225 | inline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled) | |
226 | { | |
227 | int all; | |
228 | int ret; | |
229 | ||
92638e2f | 230 | all = coupled->online_count | (coupled->online_count << WAITING_BITS); |
4126c019 CC |
231 | ret = atomic_add_unless(&coupled->ready_waiting_counts, |
232 | -MAX_WAITING_CPUS, all); | |
233 | ||
234 | return ret ? 0 : -EINVAL; | |
235 | } | |
236 | ||
237 | /** | |
238 | * cpuidle_coupled_no_cpus_ready - check if no cpus in a coupled set are ready | |
239 | * @coupled: the struct coupled that contains the current cpu | |
240 | * | |
241 | * Returns true if all of the cpus in a coupled set are out of the ready loop. | |
242 | */ | |
243 | static inline int cpuidle_coupled_no_cpus_ready(struct cpuidle_coupled *coupled) | |
244 | { | |
245 | int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS; | |
246 | return r == 0; | |
247 | } | |
248 | ||
249 | /** | |
250 | * cpuidle_coupled_cpus_ready - check if all cpus in a coupled set are ready | |
251 | * @coupled: the struct coupled that contains the current cpu | |
252 | * | |
253 | * Returns true if all cpus coupled to this target state are in the ready loop | |
254 | */ | |
255 | static inline bool cpuidle_coupled_cpus_ready(struct cpuidle_coupled *coupled) | |
256 | { | |
257 | int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS; | |
258 | return r == coupled->online_count; | |
259 | } | |
260 | ||
261 | /** | |
262 | * cpuidle_coupled_cpus_waiting - check if all cpus in a coupled set are waiting | |
263 | * @coupled: the struct coupled that contains the current cpu | |
264 | * | |
265 | * Returns true if all cpus coupled to this target state are in the wait loop | |
266 | */ | |
267 | static inline bool cpuidle_coupled_cpus_waiting(struct cpuidle_coupled *coupled) | |
268 | { | |
269 | int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK; | |
270 | return w == coupled->online_count; | |
271 | } | |
272 | ||
273 | /** | |
274 | * cpuidle_coupled_no_cpus_waiting - check if no cpus in coupled set are waiting | |
275 | * @coupled: the struct coupled that contains the current cpu | |
276 | * | |
277 | * Returns true if all of the cpus in a coupled set are out of the waiting loop. | |
278 | */ | |
279 | static inline int cpuidle_coupled_no_cpus_waiting(struct cpuidle_coupled *coupled) | |
280 | { | |
281 | int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK; | |
282 | return w == 0; | |
283 | } | |
284 | ||
285 | /** | |
286 | * cpuidle_coupled_get_state - determine the deepest idle state | |
287 | * @dev: struct cpuidle_device for this cpu | |
288 | * @coupled: the struct coupled that contains the current cpu | |
289 | * | |
290 | * Returns the deepest idle state that all coupled cpus can enter | |
291 | */ | |
292 | static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev, | |
293 | struct cpuidle_coupled *coupled) | |
294 | { | |
295 | int i; | |
296 | int state = INT_MAX; | |
297 | ||
298 | /* | |
299 | * Read barrier ensures that read of requested_state is ordered after | |
300 | * reads of ready_count. Matches the write barriers | |
301 | * cpuidle_set_state_waiting. | |
302 | */ | |
303 | smp_rmb(); | |
304 | ||
f9b531fe | 305 | for_each_cpu(i, &coupled->coupled_cpus) |
4126c019 CC |
306 | if (cpu_online(i) && coupled->requested_state[i] < state) |
307 | state = coupled->requested_state[i]; | |
308 | ||
309 | return state; | |
310 | } | |
311 | ||
f983827b | 312 | static void cpuidle_coupled_handle_poke(void *info) |
4126c019 CC |
313 | { |
314 | int cpu = (unsigned long)info; | |
f983827b CC |
315 | cpumask_set_cpu(cpu, &cpuidle_coupled_poked); |
316 | cpumask_clear_cpu(cpu, &cpuidle_coupled_poke_pending); | |
4126c019 CC |
317 | } |
318 | ||
319 | /** | |
320 | * cpuidle_coupled_poke - wake up a cpu that may be waiting | |
321 | * @cpu: target cpu | |
322 | * | |
323 | * Ensures that the target cpu exits it's waiting idle state (if it is in it) | |
324 | * and will see updates to waiting_count before it re-enters it's waiting idle | |
325 | * state. | |
326 | * | |
327 | * If cpuidle_coupled_poked_mask is already set for the target cpu, that cpu | |
328 | * either has or will soon have a pending IPI that will wake it out of idle, | |
329 | * or it is currently processing the IPI and is not in idle. | |
330 | */ | |
331 | static void cpuidle_coupled_poke(int cpu) | |
332 | { | |
966a9671 | 333 | call_single_data_t *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu); |
4126c019 | 334 | |
f983827b | 335 | if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending)) |
c46fff2a | 336 | smp_call_function_single_async(cpu, csd); |
4126c019 CC |
337 | } |
338 | ||
339 | /** | |
340 | * cpuidle_coupled_poke_others - wake up all other cpus that may be waiting | |
341 | * @dev: struct cpuidle_device for this cpu | |
342 | * @coupled: the struct coupled that contains the current cpu | |
343 | * | |
344 | * Calls cpuidle_coupled_poke on all other online cpus. | |
345 | */ | |
346 | static void cpuidle_coupled_poke_others(int this_cpu, | |
347 | struct cpuidle_coupled *coupled) | |
348 | { | |
349 | int cpu; | |
350 | ||
f9b531fe | 351 | for_each_cpu(cpu, &coupled->coupled_cpus) |
4126c019 CC |
352 | if (cpu != this_cpu && cpu_online(cpu)) |
353 | cpuidle_coupled_poke(cpu); | |
354 | } | |
355 | ||
356 | /** | |
357 | * cpuidle_coupled_set_waiting - mark this cpu as in the wait loop | |
358 | * @dev: struct cpuidle_device for this cpu | |
359 | * @coupled: the struct coupled that contains the current cpu | |
360 | * @next_state: the index in drv->states of the requested state for this cpu | |
361 | * | |
f983827b CC |
362 | * Updates the requested idle state for the specified cpuidle device. |
363 | * Returns the number of waiting cpus. | |
4126c019 | 364 | */ |
f983827b | 365 | static int cpuidle_coupled_set_waiting(int cpu, |
4126c019 CC |
366 | struct cpuidle_coupled *coupled, int next_state) |
367 | { | |
4126c019 CC |
368 | coupled->requested_state[cpu] = next_state; |
369 | ||
370 | /* | |
4126c019 CC |
371 | * The atomic_inc_return provides a write barrier to order the write |
372 | * to requested_state with the later write that increments ready_count. | |
373 | */ | |
f983827b | 374 | return atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK; |
4126c019 CC |
375 | } |
376 | ||
377 | /** | |
378 | * cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop | |
379 | * @dev: struct cpuidle_device for this cpu | |
380 | * @coupled: the struct coupled that contains the current cpu | |
381 | * | |
382 | * Removes the requested idle state for the specified cpuidle device. | |
383 | */ | |
384 | static void cpuidle_coupled_set_not_waiting(int cpu, | |
385 | struct cpuidle_coupled *coupled) | |
386 | { | |
387 | /* | |
388 | * Decrementing waiting count can race with incrementing it in | |
389 | * cpuidle_coupled_set_waiting, but that's OK. Worst case, some | |
390 | * cpus will increment ready_count and then spin until they | |
391 | * notice that this cpu has cleared it's requested_state. | |
392 | */ | |
393 | atomic_dec(&coupled->ready_waiting_counts); | |
394 | ||
395 | coupled->requested_state[cpu] = CPUIDLE_COUPLED_NOT_IDLE; | |
396 | } | |
397 | ||
398 | /** | |
399 | * cpuidle_coupled_set_done - mark this cpu as leaving the ready loop | |
400 | * @cpu: the current cpu | |
401 | * @coupled: the struct coupled that contains the current cpu | |
402 | * | |
403 | * Marks this cpu as no longer in the ready and waiting loops. Decrements | |
404 | * the waiting count first to prevent another cpu looping back in and seeing | |
405 | * this cpu as waiting just before it exits idle. | |
406 | */ | |
407 | static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled) | |
408 | { | |
409 | cpuidle_coupled_set_not_waiting(cpu, coupled); | |
410 | atomic_sub(MAX_WAITING_CPUS, &coupled->ready_waiting_counts); | |
411 | } | |
412 | ||
413 | /** | |
414 | * cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed | |
415 | * @cpu - this cpu | |
416 | * | |
417 | * Turns on interrupts and spins until any outstanding poke interrupts have | |
418 | * been processed and the poke bit has been cleared. | |
419 | * | |
420 | * Other interrupts may also be processed while interrupts are enabled, so | |
9e19b73c | 421 | * need_resched() must be tested after this function returns to make sure |
4126c019 CC |
422 | * the interrupt didn't schedule work that should take the cpu out of idle. |
423 | * | |
9e19b73c | 424 | * Returns 0 if no poke was pending, 1 if a poke was cleared. |
4126c019 CC |
425 | */ |
426 | static int cpuidle_coupled_clear_pokes(int cpu) | |
427 | { | |
9e19b73c CC |
428 | if (!cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending)) |
429 | return 0; | |
430 | ||
4126c019 | 431 | local_irq_enable(); |
f983827b | 432 | while (cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending)) |
4126c019 CC |
433 | cpu_relax(); |
434 | local_irq_disable(); | |
435 | ||
9e19b73c | 436 | return 1; |
4126c019 CC |
437 | } |
438 | ||
f983827b CC |
439 | static bool cpuidle_coupled_any_pokes_pending(struct cpuidle_coupled *coupled) |
440 | { | |
441 | cpumask_t cpus; | |
442 | int ret; | |
443 | ||
444 | cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); | |
445 | ret = cpumask_and(&cpus, &cpuidle_coupled_poke_pending, &cpus); | |
446 | ||
447 | return ret; | |
448 | } | |
449 | ||
4126c019 CC |
450 | /** |
451 | * cpuidle_enter_state_coupled - attempt to enter a state with coupled cpus | |
452 | * @dev: struct cpuidle_device for the current cpu | |
453 | * @drv: struct cpuidle_driver for the platform | |
454 | * @next_state: index of the requested state in drv->states | |
455 | * | |
456 | * Coordinate with coupled cpus to enter the target state. This is a two | |
457 | * stage process. In the first stage, the cpus are operating independently, | |
458 | * and may call into cpuidle_enter_state_coupled at completely different times. | |
459 | * To save as much power as possible, the first cpus to call this function will | |
460 | * go to an intermediate state (the cpuidle_device's safe state), and wait for | |
461 | * all the other cpus to call this function. Once all coupled cpus are idle, | |
462 | * the second stage will start. Each coupled cpu will spin until all cpus have | |
463 | * guaranteed that they will call the target_state. | |
464 | * | |
465 | * This function must be called with interrupts disabled. It may enable | |
466 | * interrupts while preparing for idle, and it will always return with | |
467 | * interrupts enabled. | |
468 | */ | |
469 | int cpuidle_enter_state_coupled(struct cpuidle_device *dev, | |
470 | struct cpuidle_driver *drv, int next_state) | |
471 | { | |
472 | int entered_state = -1; | |
473 | struct cpuidle_coupled *coupled = dev->coupled; | |
f983827b | 474 | int w; |
4126c019 CC |
475 | |
476 | if (!coupled) | |
477 | return -EINVAL; | |
478 | ||
479 | while (coupled->prevent) { | |
9e19b73c CC |
480 | cpuidle_coupled_clear_pokes(dev->cpu); |
481 | if (need_resched()) { | |
4126c019 CC |
482 | local_irq_enable(); |
483 | return entered_state; | |
484 | } | |
485 | entered_state = cpuidle_enter_state(dev, drv, | |
ba6a860d | 486 | drv->safe_state_index); |
59e99856 | 487 | local_irq_disable(); |
4126c019 CC |
488 | } |
489 | ||
490 | /* Read barrier ensures online_count is read after prevent is cleared */ | |
491 | smp_rmb(); | |
492 | ||
f983827b CC |
493 | reset: |
494 | cpumask_clear_cpu(dev->cpu, &cpuidle_coupled_poked); | |
495 | ||
496 | w = cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state); | |
497 | /* | |
498 | * If this is the last cpu to enter the waiting state, poke | |
499 | * all the other cpus out of their waiting state so they can | |
500 | * enter a deeper state. This can race with one of the cpus | |
501 | * exiting the waiting state due to an interrupt and | |
502 | * decrementing waiting_count, see comment below. | |
503 | */ | |
504 | if (w == coupled->online_count) { | |
505 | cpumask_set_cpu(dev->cpu, &cpuidle_coupled_poked); | |
506 | cpuidle_coupled_poke_others(dev->cpu, coupled); | |
507 | } | |
4126c019 CC |
508 | |
509 | retry: | |
510 | /* | |
511 | * Wait for all coupled cpus to be idle, using the deepest state | |
f983827b CC |
512 | * allowed for a single cpu. If this was not the poking cpu, wait |
513 | * for at least one poke before leaving to avoid a race where | |
514 | * two cpus could arrive at the waiting loop at the same time, | |
515 | * but the first of the two to arrive could skip the loop without | |
516 | * processing the pokes from the last to arrive. | |
4126c019 | 517 | */ |
f983827b CC |
518 | while (!cpuidle_coupled_cpus_waiting(coupled) || |
519 | !cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked)) { | |
9e19b73c CC |
520 | if (cpuidle_coupled_clear_pokes(dev->cpu)) |
521 | continue; | |
522 | ||
523 | if (need_resched()) { | |
4126c019 CC |
524 | cpuidle_coupled_set_not_waiting(dev->cpu, coupled); |
525 | goto out; | |
526 | } | |
527 | ||
528 | if (coupled->prevent) { | |
529 | cpuidle_coupled_set_not_waiting(dev->cpu, coupled); | |
530 | goto out; | |
531 | } | |
532 | ||
533 | entered_state = cpuidle_enter_state(dev, drv, | |
ba6a860d | 534 | drv->safe_state_index); |
59e99856 | 535 | local_irq_disable(); |
4126c019 CC |
536 | } |
537 | ||
9e19b73c CC |
538 | cpuidle_coupled_clear_pokes(dev->cpu); |
539 | if (need_resched()) { | |
4126c019 CC |
540 | cpuidle_coupled_set_not_waiting(dev->cpu, coupled); |
541 | goto out; | |
542 | } | |
543 | ||
f983827b CC |
544 | /* |
545 | * Make sure final poke status for this cpu is visible before setting | |
546 | * cpu as ready. | |
547 | */ | |
548 | smp_wmb(); | |
549 | ||
4126c019 CC |
550 | /* |
551 | * All coupled cpus are probably idle. There is a small chance that | |
552 | * one of the other cpus just became active. Increment the ready count, | |
553 | * and spin until all coupled cpus have incremented the counter. Once a | |
554 | * cpu has incremented the ready counter, it cannot abort idle and must | |
555 | * spin until either all cpus have incremented the ready counter, or | |
556 | * another cpu leaves idle and decrements the waiting counter. | |
557 | */ | |
558 | ||
559 | cpuidle_coupled_set_ready(coupled); | |
560 | while (!cpuidle_coupled_cpus_ready(coupled)) { | |
561 | /* Check if any other cpus bailed out of idle. */ | |
562 | if (!cpuidle_coupled_cpus_waiting(coupled)) | |
563 | if (!cpuidle_coupled_set_not_ready(coupled)) | |
564 | goto retry; | |
565 | ||
566 | cpu_relax(); | |
567 | } | |
568 | ||
f983827b CC |
569 | /* |
570 | * Make sure read of all cpus ready is done before reading pending pokes | |
571 | */ | |
572 | smp_rmb(); | |
573 | ||
574 | /* | |
575 | * There is a small chance that a cpu left and reentered idle after this | |
576 | * cpu saw that all cpus were waiting. The cpu that reentered idle will | |
577 | * have sent this cpu a poke, which will still be pending after the | |
578 | * ready loop. The pending interrupt may be lost by the interrupt | |
579 | * controller when entering the deep idle state. It's not possible to | |
580 | * clear a pending interrupt without turning interrupts on and handling | |
581 | * it, and it's too late to turn on interrupts here, so reset the | |
582 | * coupled idle state of all cpus and retry. | |
583 | */ | |
584 | if (cpuidle_coupled_any_pokes_pending(coupled)) { | |
585 | cpuidle_coupled_set_done(dev->cpu, coupled); | |
586 | /* Wait for all cpus to see the pending pokes */ | |
587 | cpuidle_coupled_parallel_barrier(dev, &coupled->abort_barrier); | |
588 | goto reset; | |
589 | } | |
590 | ||
4126c019 CC |
591 | /* all cpus have acked the coupled state */ |
592 | next_state = cpuidle_coupled_get_state(dev, coupled); | |
593 | ||
594 | entered_state = cpuidle_enter_state(dev, drv, next_state); | |
595 | ||
596 | cpuidle_coupled_set_done(dev->cpu, coupled); | |
597 | ||
598 | out: | |
599 | /* | |
600 | * Normal cpuidle states are expected to return with irqs enabled. | |
601 | * That leads to an inefficiency where a cpu receiving an interrupt | |
602 | * that brings it out of idle will process that interrupt before | |
603 | * exiting the idle enter function and decrementing ready_count. All | |
604 | * other cpus will need to spin waiting for the cpu that is processing | |
605 | * the interrupt. If the driver returns with interrupts disabled, | |
606 | * all other cpus will loop back into the safe idle state instead of | |
607 | * spinning, saving power. | |
608 | * | |
609 | * Calling local_irq_enable here allows coupled states to return with | |
610 | * interrupts disabled, but won't cause problems for drivers that | |
611 | * exit with interrupts enabled. | |
612 | */ | |
613 | local_irq_enable(); | |
614 | ||
615 | /* | |
616 | * Wait until all coupled cpus have exited idle. There is no risk that | |
617 | * a cpu exits and re-enters the ready state because this cpu has | |
618 | * already decremented its waiting_count. | |
619 | */ | |
620 | while (!cpuidle_coupled_no_cpus_ready(coupled)) | |
621 | cpu_relax(); | |
622 | ||
623 | return entered_state; | |
624 | } | |
625 | ||
626 | static void cpuidle_coupled_update_online_cpus(struct cpuidle_coupled *coupled) | |
627 | { | |
628 | cpumask_t cpus; | |
629 | cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); | |
630 | coupled->online_count = cpumask_weight(&cpus); | |
631 | } | |
632 | ||
633 | /** | |
634 | * cpuidle_coupled_register_device - register a coupled cpuidle device | |
635 | * @dev: struct cpuidle_device for the current cpu | |
636 | * | |
637 | * Called from cpuidle_register_device to handle coupled idle init. Finds the | |
638 | * cpuidle_coupled struct for this set of coupled cpus, or creates one if none | |
639 | * exists yet. | |
640 | */ | |
641 | int cpuidle_coupled_register_device(struct cpuidle_device *dev) | |
642 | { | |
643 | int cpu; | |
644 | struct cpuidle_device *other_dev; | |
966a9671 | 645 | call_single_data_t *csd; |
4126c019 CC |
646 | struct cpuidle_coupled *coupled; |
647 | ||
648 | if (cpumask_empty(&dev->coupled_cpus)) | |
649 | return 0; | |
650 | ||
f9b531fe | 651 | for_each_cpu(cpu, &dev->coupled_cpus) { |
4126c019 CC |
652 | other_dev = per_cpu(cpuidle_devices, cpu); |
653 | if (other_dev && other_dev->coupled) { | |
654 | coupled = other_dev->coupled; | |
655 | goto have_coupled; | |
656 | } | |
657 | } | |
658 | ||
659 | /* No existing coupled info found, create a new one */ | |
660 | coupled = kzalloc(sizeof(struct cpuidle_coupled), GFP_KERNEL); | |
661 | if (!coupled) | |
662 | return -ENOMEM; | |
663 | ||
664 | coupled->coupled_cpus = dev->coupled_cpus; | |
665 | ||
666 | have_coupled: | |
667 | dev->coupled = coupled; | |
668 | if (WARN_ON(!cpumask_equal(&dev->coupled_cpus, &coupled->coupled_cpus))) | |
669 | coupled->prevent++; | |
670 | ||
671 | cpuidle_coupled_update_online_cpus(coupled); | |
672 | ||
673 | coupled->refcnt++; | |
674 | ||
675 | csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu); | |
f983827b | 676 | csd->func = cpuidle_coupled_handle_poke; |
4126c019 CC |
677 | csd->info = (void *)(unsigned long)dev->cpu; |
678 | ||
679 | return 0; | |
680 | } | |
681 | ||
682 | /** | |
683 | * cpuidle_coupled_unregister_device - unregister a coupled cpuidle device | |
684 | * @dev: struct cpuidle_device for the current cpu | |
685 | * | |
686 | * Called from cpuidle_unregister_device to tear down coupled idle. Removes the | |
687 | * cpu from the coupled idle set, and frees the cpuidle_coupled_info struct if | |
688 | * this was the last cpu in the set. | |
689 | */ | |
690 | void cpuidle_coupled_unregister_device(struct cpuidle_device *dev) | |
691 | { | |
692 | struct cpuidle_coupled *coupled = dev->coupled; | |
693 | ||
694 | if (cpumask_empty(&dev->coupled_cpus)) | |
695 | return; | |
696 | ||
697 | if (--coupled->refcnt) | |
698 | kfree(coupled); | |
699 | dev->coupled = NULL; | |
700 | } | |
701 | ||
702 | /** | |
703 | * cpuidle_coupled_prevent_idle - prevent cpus from entering a coupled state | |
704 | * @coupled: the struct coupled that contains the cpu that is changing state | |
705 | * | |
706 | * Disables coupled cpuidle on a coupled set of cpus. Used to ensure that | |
707 | * cpu_online_mask doesn't change while cpus are coordinating coupled idle. | |
708 | */ | |
709 | static void cpuidle_coupled_prevent_idle(struct cpuidle_coupled *coupled) | |
710 | { | |
711 | int cpu = get_cpu(); | |
712 | ||
713 | /* Force all cpus out of the waiting loop. */ | |
714 | coupled->prevent++; | |
715 | cpuidle_coupled_poke_others(cpu, coupled); | |
716 | put_cpu(); | |
717 | while (!cpuidle_coupled_no_cpus_waiting(coupled)) | |
718 | cpu_relax(); | |
719 | } | |
720 | ||
721 | /** | |
722 | * cpuidle_coupled_allow_idle - allows cpus to enter a coupled state | |
723 | * @coupled: the struct coupled that contains the cpu that is changing state | |
724 | * | |
725 | * Enables coupled cpuidle on a coupled set of cpus. Used to ensure that | |
726 | * cpu_online_mask doesn't change while cpus are coordinating coupled idle. | |
727 | */ | |
728 | static void cpuidle_coupled_allow_idle(struct cpuidle_coupled *coupled) | |
729 | { | |
730 | int cpu = get_cpu(); | |
731 | ||
732 | /* | |
733 | * Write barrier ensures readers see the new online_count when they | |
734 | * see prevent == 0. | |
735 | */ | |
736 | smp_wmb(); | |
737 | coupled->prevent--; | |
738 | /* Force cpus out of the prevent loop. */ | |
739 | cpuidle_coupled_poke_others(cpu, coupled); | |
740 | put_cpu(); | |
741 | } | |
742 | ||
dfc616d8 | 743 | static int coupled_cpu_online(unsigned int cpu) |
4126c019 | 744 | { |
4126c019 CC |
745 | struct cpuidle_device *dev; |
746 | ||
747 | mutex_lock(&cpuidle_lock); | |
748 | ||
749 | dev = per_cpu(cpuidle_devices, cpu); | |
dfc616d8 | 750 | if (dev && dev->coupled) { |
4126c019 | 751 | cpuidle_coupled_update_online_cpus(dev->coupled); |
4126c019 | 752 | cpuidle_coupled_allow_idle(dev->coupled); |
4126c019 CC |
753 | } |
754 | ||
4126c019 | 755 | mutex_unlock(&cpuidle_lock); |
dfc616d8 | 756 | return 0; |
4126c019 CC |
757 | } |
758 | ||
dfc616d8 SAS |
759 | static int coupled_cpu_up_prepare(unsigned int cpu) |
760 | { | |
761 | struct cpuidle_device *dev; | |
762 | ||
763 | mutex_lock(&cpuidle_lock); | |
764 | ||
765 | dev = per_cpu(cpuidle_devices, cpu); | |
766 | if (dev && dev->coupled) | |
767 | cpuidle_coupled_prevent_idle(dev->coupled); | |
768 | ||
769 | mutex_unlock(&cpuidle_lock); | |
770 | return 0; | |
771 | } | |
4126c019 CC |
772 | |
773 | static int __init cpuidle_coupled_init(void) | |
774 | { | |
dfc616d8 SAS |
775 | int ret; |
776 | ||
777 | ret = cpuhp_setup_state_nocalls(CPUHP_CPUIDLE_COUPLED_PREPARE, | |
778 | "cpuidle/coupled:prepare", | |
779 | coupled_cpu_up_prepare, | |
780 | coupled_cpu_online); | |
781 | if (ret) | |
782 | return ret; | |
783 | ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, | |
784 | "cpuidle/coupled:online", | |
785 | coupled_cpu_online, | |
786 | coupled_cpu_up_prepare); | |
787 | if (ret < 0) | |
788 | cpuhp_remove_state_nocalls(CPUHP_CPUIDLE_COUPLED_PREPARE); | |
789 | return ret; | |
4126c019 CC |
790 | } |
791 | core_initcall(cpuidle_coupled_init); |