Commit | Line | Data |
---|---|---|
e8db288e NP |
1 | /* |
2 | * arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM | |
3 | * | |
4 | * Created by: Nicolas Pitre, March 2012 | |
5 | * Copyright: (C) 2012-2013 Linaro Limited | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | ||
7c2b8605 NP |
12 | #include <linux/kernel.h> |
13 | #include <linux/init.h> | |
14 | #include <linux/irqflags.h> | |
3721924c | 15 | #include <linux/cpu_pm.h> |
7c2b8605 | 16 | |
e8db288e NP |
17 | #include <asm/mcpm.h> |
18 | #include <asm/cacheflush.h> | |
7c2b8605 | 19 | #include <asm/idmap.h> |
7fe31d28 | 20 | #include <asm/cputype.h> |
3721924c | 21 | #include <asm/suspend.h> |
e8db288e NP |
22 | |
23 | extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; | |
24 | ||
25 | void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr) | |
26 | { | |
27 | unsigned long val = ptr ? virt_to_phys(ptr) : 0; | |
28 | mcpm_entry_vectors[cluster][cpu] = val; | |
29 | sync_cache_w(&mcpm_entry_vectors[cluster][cpu]); | |
30 | } | |
7c2b8605 | 31 | |
de885d14 NP |
32 | extern unsigned long mcpm_entry_early_pokes[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER][2]; |
33 | ||
34 | void mcpm_set_early_poke(unsigned cpu, unsigned cluster, | |
35 | unsigned long poke_phys_addr, unsigned long poke_val) | |
36 | { | |
37 | unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0]; | |
38 | poke[0] = poke_phys_addr; | |
39 | poke[1] = poke_val; | |
efcfc46e | 40 | __sync_cache_range_w(poke, 2 * sizeof(*poke)); |
de885d14 NP |
41 | } |
42 | ||
7c2b8605 NP |
43 | static const struct mcpm_platform_ops *platform_ops; |
44 | ||
45 | int __init mcpm_platform_register(const struct mcpm_platform_ops *ops) | |
46 | { | |
47 | if (platform_ops) | |
48 | return -EBUSY; | |
49 | platform_ops = ops; | |
50 | return 0; | |
51 | } | |
52 | ||
4530e4b6 NP |
53 | bool mcpm_is_available(void) |
54 | { | |
55 | return (platform_ops) ? true : false; | |
56 | } | |
57 | ||
d3a87544 NP |
58 | /* |
59 | * We can't use regular spinlocks. In the switcher case, it is possible | |
60 | * for an outbound CPU to call power_down() after its inbound counterpart | |
61 | * is already live using the same logical CPU number which trips lockdep | |
62 | * debugging. | |
63 | */ | |
64 | static arch_spinlock_t mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED; | |
65 | ||
66 | static int mcpm_cpu_use_count[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; | |
67 | ||
68 | static inline bool mcpm_cluster_unused(unsigned int cluster) | |
69 | { | |
70 | int i, cnt; | |
71 | for (i = 0, cnt = 0; i < MAX_CPUS_PER_CLUSTER; i++) | |
72 | cnt |= mcpm_cpu_use_count[cluster][i]; | |
73 | return !cnt; | |
74 | } | |
75 | ||
7c2b8605 NP |
76 | int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster) |
77 | { | |
d3a87544 NP |
78 | bool cpu_is_down, cluster_is_down; |
79 | int ret = 0; | |
80 | ||
7c2b8605 NP |
81 | if (!platform_ops) |
82 | return -EUNATCH; /* try not to shadow power_up errors */ | |
83 | might_sleep(); | |
d3a87544 NP |
84 | |
85 | /* backward compatibility callback */ | |
86 | if (platform_ops->power_up) | |
87 | return platform_ops->power_up(cpu, cluster); | |
88 | ||
89 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); | |
90 | ||
91 | /* | |
92 | * Since this is called with IRQs enabled, and no arch_spin_lock_irq | |
93 | * variant exists, we need to disable IRQs manually here. | |
94 | */ | |
95 | local_irq_disable(); | |
96 | arch_spin_lock(&mcpm_lock); | |
97 | ||
98 | cpu_is_down = !mcpm_cpu_use_count[cluster][cpu]; | |
99 | cluster_is_down = mcpm_cluster_unused(cluster); | |
100 | ||
101 | mcpm_cpu_use_count[cluster][cpu]++; | |
102 | /* | |
103 | * The only possible values are: | |
104 | * 0 = CPU down | |
105 | * 1 = CPU (still) up | |
106 | * 2 = CPU requested to be up before it had a chance | |
107 | * to actually make itself down. | |
108 | * Any other value is a bug. | |
109 | */ | |
110 | BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 1 && | |
111 | mcpm_cpu_use_count[cluster][cpu] != 2); | |
112 | ||
113 | if (cluster_is_down) | |
114 | ret = platform_ops->cluster_powerup(cluster); | |
115 | if (cpu_is_down && !ret) | |
116 | ret = platform_ops->cpu_powerup(cpu, cluster); | |
117 | ||
118 | arch_spin_unlock(&mcpm_lock); | |
119 | local_irq_enable(); | |
120 | return ret; | |
7c2b8605 NP |
121 | } |
122 | ||
123 | typedef void (*phys_reset_t)(unsigned long); | |
124 | ||
125 | void mcpm_cpu_power_down(void) | |
126 | { | |
d3a87544 NP |
127 | unsigned int mpidr, cpu, cluster; |
128 | bool cpu_going_down, last_man; | |
7c2b8605 NP |
129 | phys_reset_t phys_reset; |
130 | ||
d3a87544 NP |
131 | if (WARN_ON_ONCE(!platform_ops)) |
132 | return; | |
7c2b8605 NP |
133 | BUG_ON(!irqs_disabled()); |
134 | ||
135 | /* | |
136 | * Do this before calling into the power_down method, | |
137 | * as it might not always be safe to do afterwards. | |
138 | */ | |
139 | setup_mm_for_reboot(); | |
140 | ||
d3a87544 NP |
141 | /* backward compatibility callback */ |
142 | if (platform_ops->power_down) { | |
143 | platform_ops->power_down(); | |
144 | goto not_dead; | |
145 | } | |
146 | ||
147 | mpidr = read_cpuid_mpidr(); | |
148 | cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | |
149 | cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | |
150 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); | |
151 | ||
152 | __mcpm_cpu_going_down(cpu, cluster); | |
7c2b8605 | 153 | |
d3a87544 NP |
154 | arch_spin_lock(&mcpm_lock); |
155 | BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP); | |
156 | ||
157 | mcpm_cpu_use_count[cluster][cpu]--; | |
158 | BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 0 && | |
159 | mcpm_cpu_use_count[cluster][cpu] != 1); | |
160 | cpu_going_down = !mcpm_cpu_use_count[cluster][cpu]; | |
161 | last_man = mcpm_cluster_unused(cluster); | |
162 | ||
163 | if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { | |
164 | platform_ops->cpu_powerdown_prepare(cpu, cluster); | |
165 | platform_ops->cluster_powerdown_prepare(cluster); | |
166 | arch_spin_unlock(&mcpm_lock); | |
167 | platform_ops->cluster_cache_disable(); | |
168 | __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); | |
169 | } else { | |
170 | if (cpu_going_down) | |
171 | platform_ops->cpu_powerdown_prepare(cpu, cluster); | |
172 | arch_spin_unlock(&mcpm_lock); | |
173 | /* | |
174 | * If cpu_going_down is false here, that means a power_up | |
175 | * request raced ahead of us. Even if we do not want to | |
176 | * shut this CPU down, the caller still expects execution | |
177 | * to return through the system resume entry path, like | |
178 | * when the WFI is aborted due to a new IRQ or the like.. | |
179 | * So let's continue with cache cleaning in all cases. | |
180 | */ | |
181 | platform_ops->cpu_cache_disable(); | |
182 | } | |
183 | ||
184 | __mcpm_cpu_down(cpu, cluster); | |
185 | ||
186 | /* Now we are prepared for power-down, do it: */ | |
187 | if (cpu_going_down) | |
188 | wfi(); | |
189 | ||
190 | not_dead: | |
7c2b8605 NP |
191 | /* |
192 | * It is possible for a power_up request to happen concurrently | |
193 | * with a power_down request for the same CPU. In this case the | |
d3a87544 NP |
194 | * CPU might not be able to actually enter a powered down state |
195 | * with the WFI instruction if the power_up request has removed | |
196 | * the required reset condition. We must perform a re-entry in | |
197 | * the kernel as if the power_up method just had deasserted reset | |
198 | * on the CPU. | |
7c2b8605 | 199 | */ |
7c2b8605 NP |
200 | phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); |
201 | phys_reset(virt_to_phys(mcpm_entry_point)); | |
202 | ||
203 | /* should never get here */ | |
204 | BUG(); | |
205 | } | |
206 | ||
166aaf39 | 207 | int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster) |
0de0d646 DM |
208 | { |
209 | int ret; | |
210 | ||
166aaf39 | 211 | if (WARN_ON_ONCE(!platform_ops || !platform_ops->wait_for_powerdown)) |
0de0d646 DM |
212 | return -EUNATCH; |
213 | ||
166aaf39 | 214 | ret = platform_ops->wait_for_powerdown(cpu, cluster); |
0de0d646 DM |
215 | if (ret) |
216 | pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n", | |
217 | __func__, cpu, cluster, ret); | |
218 | ||
219 | return ret; | |
220 | } | |
221 | ||
7c2b8605 NP |
222 | void mcpm_cpu_suspend(u64 expected_residency) |
223 | { | |
d3a87544 | 224 | if (WARN_ON_ONCE(!platform_ops)) |
d0cdef6e | 225 | return; |
7c2b8605 | 226 | |
d3a87544 NP |
227 | /* backward compatibility callback */ |
228 | if (platform_ops->suspend) { | |
229 | phys_reset_t phys_reset; | |
230 | BUG_ON(!irqs_disabled()); | |
231 | setup_mm_for_reboot(); | |
232 | platform_ops->suspend(expected_residency); | |
233 | phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); | |
234 | phys_reset(virt_to_phys(mcpm_entry_point)); | |
235 | BUG(); | |
236 | } | |
237 | ||
238 | /* Some platforms might have to enable special resume modes, etc. */ | |
239 | if (platform_ops->cpu_suspend_prepare) { | |
240 | unsigned int mpidr = read_cpuid_mpidr(); | |
241 | unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | |
242 | unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | |
243 | arch_spin_lock(&mcpm_lock); | |
244 | platform_ops->cpu_suspend_prepare(cpu, cluster); | |
245 | arch_spin_unlock(&mcpm_lock); | |
246 | } | |
247 | mcpm_cpu_power_down(); | |
7c2b8605 NP |
248 | } |
249 | ||
250 | int mcpm_cpu_powered_up(void) | |
251 | { | |
d3a87544 NP |
252 | unsigned int mpidr, cpu, cluster; |
253 | bool cpu_was_down, first_man; | |
254 | unsigned long flags; | |
255 | ||
7c2b8605 NP |
256 | if (!platform_ops) |
257 | return -EUNATCH; | |
d3a87544 NP |
258 | |
259 | /* backward compatibility callback */ | |
260 | if (platform_ops->powered_up) { | |
7c2b8605 | 261 | platform_ops->powered_up(); |
d3a87544 NP |
262 | return 0; |
263 | } | |
264 | ||
265 | mpidr = read_cpuid_mpidr(); | |
266 | cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | |
267 | cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | |
268 | local_irq_save(flags); | |
269 | arch_spin_lock(&mcpm_lock); | |
270 | ||
271 | cpu_was_down = !mcpm_cpu_use_count[cluster][cpu]; | |
272 | first_man = mcpm_cluster_unused(cluster); | |
273 | ||
274 | if (first_man && platform_ops->cluster_is_up) | |
275 | platform_ops->cluster_is_up(cluster); | |
276 | if (cpu_was_down) | |
277 | mcpm_cpu_use_count[cluster][cpu] = 1; | |
278 | if (platform_ops->cpu_is_up) | |
279 | platform_ops->cpu_is_up(cpu, cluster); | |
280 | ||
281 | arch_spin_unlock(&mcpm_lock); | |
282 | local_irq_restore(flags); | |
283 | ||
7c2b8605 NP |
284 | return 0; |
285 | } | |
7fe31d28 | 286 | |
3721924c NP |
287 | #ifdef CONFIG_ARM_CPU_SUSPEND |
288 | ||
289 | static int __init nocache_trampoline(unsigned long _arg) | |
290 | { | |
291 | void (*cache_disable)(void) = (void *)_arg; | |
292 | unsigned int mpidr = read_cpuid_mpidr(); | |
293 | unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | |
294 | unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | |
295 | phys_reset_t phys_reset; | |
296 | ||
297 | mcpm_set_entry_vector(cpu, cluster, cpu_resume); | |
298 | setup_mm_for_reboot(); | |
299 | ||
300 | __mcpm_cpu_going_down(cpu, cluster); | |
301 | BUG_ON(!__mcpm_outbound_enter_critical(cpu, cluster)); | |
302 | cache_disable(); | |
303 | __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); | |
304 | __mcpm_cpu_down(cpu, cluster); | |
305 | ||
306 | phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); | |
307 | phys_reset(virt_to_phys(mcpm_entry_point)); | |
308 | BUG(); | |
309 | } | |
310 | ||
311 | int __init mcpm_loopback(void (*cache_disable)(void)) | |
312 | { | |
313 | int ret; | |
314 | ||
315 | /* | |
316 | * We're going to soft-restart the current CPU through the | |
317 | * low-level MCPM code by leveraging the suspend/resume | |
318 | * infrastructure. Let's play it safe by using cpu_pm_enter() | |
319 | * in case the CPU init code path resets the VFP or similar. | |
320 | */ | |
321 | local_irq_disable(); | |
322 | local_fiq_disable(); | |
323 | ret = cpu_pm_enter(); | |
324 | if (!ret) { | |
325 | ret = cpu_suspend((unsigned long)cache_disable, nocache_trampoline); | |
326 | cpu_pm_exit(); | |
327 | } | |
328 | local_fiq_enable(); | |
329 | local_irq_enable(); | |
330 | if (ret) | |
331 | pr_err("%s returned %d\n", __func__, ret); | |
332 | return ret; | |
333 | } | |
334 | ||
335 | #endif | |
336 | ||
7fe31d28 DM |
337 | struct sync_struct mcpm_sync; |
338 | ||
339 | /* | |
340 | * __mcpm_cpu_going_down: Indicates that the cpu is being torn down. | |
341 | * This must be called at the point of committing to teardown of a CPU. | |
342 | * The CPU cache (SCTRL.C bit) is expected to still be active. | |
343 | */ | |
344 | void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) | |
345 | { | |
346 | mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; | |
347 | sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); | |
348 | } | |
349 | ||
350 | /* | |
351 | * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the | |
352 | * cluster can be torn down without disrupting this CPU. | |
353 | * To avoid deadlocks, this must be called before a CPU is powered down. | |
354 | * The CPU cache (SCTRL.C bit) is expected to be off. | |
355 | * However L2 cache might or might not be active. | |
356 | */ | |
357 | void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) | |
358 | { | |
359 | dmb(); | |
360 | mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; | |
361 | sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); | |
03aa6580 | 362 | sev(); |
7fe31d28 DM |
363 | } |
364 | ||
365 | /* | |
366 | * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section. | |
367 | * @state: the final state of the cluster: | |
368 | * CLUSTER_UP: no destructive teardown was done and the cluster has been | |
369 | * restored to the previous state (CPU cache still active); or | |
370 | * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off | |
371 | * (CPU cache disabled, L2 cache either enabled or disabled). | |
372 | */ | |
373 | void __mcpm_outbound_leave_critical(unsigned int cluster, int state) | |
374 | { | |
375 | dmb(); | |
376 | mcpm_sync.clusters[cluster].cluster = state; | |
377 | sync_cache_w(&mcpm_sync.clusters[cluster].cluster); | |
03aa6580 | 378 | sev(); |
7fe31d28 DM |
379 | } |
380 | ||
381 | /* | |
382 | * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section. | |
383 | * This function should be called by the last man, after local CPU teardown | |
384 | * is complete. CPU cache expected to be active. | |
385 | * | |
386 | * Returns: | |
387 | * false: the critical section was not entered because an inbound CPU was | |
388 | * observed, or the cluster is already being set up; | |
389 | * true: the critical section was entered: it is now safe to tear down the | |
390 | * cluster. | |
391 | */ | |
392 | bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) | |
393 | { | |
394 | unsigned int i; | |
395 | struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster]; | |
396 | ||
397 | /* Warn inbound CPUs that the cluster is being torn down: */ | |
398 | c->cluster = CLUSTER_GOING_DOWN; | |
399 | sync_cache_w(&c->cluster); | |
400 | ||
401 | /* Back out if the inbound cluster is already in the critical region: */ | |
402 | sync_cache_r(&c->inbound); | |
403 | if (c->inbound == INBOUND_COMING_UP) | |
404 | goto abort; | |
405 | ||
406 | /* | |
407 | * Wait for all CPUs to get out of the GOING_DOWN state, so that local | |
408 | * teardown is complete on each CPU before tearing down the cluster. | |
409 | * | |
410 | * If any CPU has been woken up again from the DOWN state, then we | |
411 | * shouldn't be taking the cluster down at all: abort in that case. | |
412 | */ | |
413 | sync_cache_r(&c->cpus); | |
414 | for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) { | |
415 | int cpustate; | |
416 | ||
417 | if (i == cpu) | |
418 | continue; | |
419 | ||
420 | while (1) { | |
421 | cpustate = c->cpus[i].cpu; | |
422 | if (cpustate != CPU_GOING_DOWN) | |
423 | break; | |
424 | ||
425 | wfe(); | |
426 | sync_cache_r(&c->cpus[i].cpu); | |
427 | } | |
428 | ||
429 | switch (cpustate) { | |
430 | case CPU_DOWN: | |
431 | continue; | |
432 | ||
433 | default: | |
434 | goto abort; | |
435 | } | |
436 | } | |
437 | ||
438 | return true; | |
439 | ||
440 | abort: | |
441 | __mcpm_outbound_leave_critical(cluster, CLUSTER_UP); | |
442 | return false; | |
443 | } | |
444 | ||
445 | int __mcpm_cluster_state(unsigned int cluster) | |
446 | { | |
447 | sync_cache_r(&mcpm_sync.clusters[cluster].cluster); | |
448 | return mcpm_sync.clusters[cluster].cluster; | |
449 | } | |
450 | ||
451 | extern unsigned long mcpm_power_up_setup_phys; | |
452 | ||
453 | int __init mcpm_sync_init( | |
454 | void (*power_up_setup)(unsigned int affinity_level)) | |
455 | { | |
456 | unsigned int i, j, mpidr, this_cluster; | |
457 | ||
458 | BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync); | |
459 | BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1)); | |
460 | ||
461 | /* | |
462 | * Set initial CPU and cluster states. | |
463 | * Only one cluster is assumed to be active at this point. | |
464 | */ | |
465 | for (i = 0; i < MAX_NR_CLUSTERS; i++) { | |
466 | mcpm_sync.clusters[i].cluster = CLUSTER_DOWN; | |
467 | mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP; | |
468 | for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++) | |
469 | mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN; | |
470 | } | |
471 | mpidr = read_cpuid_mpidr(); | |
472 | this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | |
d3a87544 NP |
473 | for_each_online_cpu(i) { |
474 | mcpm_cpu_use_count[this_cluster][i] = 1; | |
7fe31d28 | 475 | mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP; |
d3a87544 | 476 | } |
7fe31d28 DM |
477 | mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP; |
478 | sync_cache_w(&mcpm_sync); | |
479 | ||
480 | if (power_up_setup) { | |
481 | mcpm_power_up_setup_phys = virt_to_phys(power_up_setup); | |
482 | sync_cache_w(&mcpm_power_up_setup_phys); | |
483 | } | |
484 | ||
485 | return 0; | |
486 | } |