Commit | Line | Data |
---|---|---|
ea8b1c4a KB |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License version 2 as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * Copyright (C) 2016 ARM Limited | |
12 | */ | |
13 | ||
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
15 | ||
16 | #include <linux/atomic.h> | |
17 | #include <linux/completion.h> | |
18 | #include <linux/cpu.h> | |
19 | #include <linux/cpuidle.h> | |
20 | #include <linux/cpu_pm.h> | |
21 | #include <linux/kernel.h> | |
22 | #include <linux/kthread.h> | |
ae7e81c0 | 23 | #include <uapi/linux/sched/types.h> |
ea8b1c4a KB |
24 | #include <linux/module.h> |
25 | #include <linux/preempt.h> | |
26 | #include <linux/psci.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/tick.h> | |
29 | #include <linux/topology.h> | |
30 | ||
31 | #include <asm/cpuidle.h> | |
32 | ||
33 | #include <uapi/linux/psci.h> | |
34 | ||
35 | #define NUM_SUSPEND_CYCLE (10) | |
36 | ||
37 | static unsigned int nb_available_cpus; | |
38 | static int tos_resident_cpu = -1; | |
39 | ||
40 | static atomic_t nb_active_threads; | |
41 | static struct completion suspend_threads_started = | |
42 | COMPLETION_INITIALIZER(suspend_threads_started); | |
43 | static struct completion suspend_threads_done = | |
44 | COMPLETION_INITIALIZER(suspend_threads_done); | |
45 | ||
46 | /* | |
47 | * We assume that PSCI operations are used if they are available. This is not | |
48 | * necessarily true on arm64, since the decision is based on the | |
49 | * "enable-method" property of each CPU in the DT, but given that there is no | |
50 | * arch-specific way to check this, we assume that the DT is sensible. | |
51 | */ | |
52 | static int psci_ops_check(void) | |
53 | { | |
54 | int migrate_type = -1; | |
55 | int cpu; | |
56 | ||
57 | if (!(psci_ops.cpu_off && psci_ops.cpu_on && psci_ops.cpu_suspend)) { | |
58 | pr_warn("Missing PSCI operations, aborting tests\n"); | |
59 | return -EOPNOTSUPP; | |
60 | } | |
61 | ||
62 | if (psci_ops.migrate_info_type) | |
63 | migrate_type = psci_ops.migrate_info_type(); | |
64 | ||
65 | if (migrate_type == PSCI_0_2_TOS_UP_MIGRATE || | |
66 | migrate_type == PSCI_0_2_TOS_UP_NO_MIGRATE) { | |
67 | /* There is a UP Trusted OS, find on which core it resides. */ | |
68 | for_each_online_cpu(cpu) | |
69 | if (psci_tos_resident_on(cpu)) { | |
70 | tos_resident_cpu = cpu; | |
71 | break; | |
72 | } | |
73 | if (tos_resident_cpu == -1) | |
74 | pr_warn("UP Trusted OS resides on no online CPU\n"); | |
75 | } | |
76 | ||
77 | return 0; | |
78 | } | |
79 | ||
ea8b1c4a KB |
80 | /* |
81 | * offlined_cpus is a temporary array but passing it as an argument avoids | |
82 | * multiple allocations. | |
83 | */ | |
84 | static unsigned int down_and_up_cpus(const struct cpumask *cpus, | |
85 | struct cpumask *offlined_cpus) | |
86 | { | |
87 | int cpu; | |
88 | int err = 0; | |
89 | ||
90 | cpumask_clear(offlined_cpus); | |
91 | ||
92 | /* Try to power down all CPUs in the mask. */ | |
93 | for_each_cpu(cpu, cpus) { | |
94 | int ret = cpu_down(cpu); | |
95 | ||
96 | /* | |
97 | * cpu_down() checks the number of online CPUs before the TOS | |
98 | * resident CPU. | |
99 | */ | |
100 | if (cpumask_weight(offlined_cpus) + 1 == nb_available_cpus) { | |
101 | if (ret != -EBUSY) { | |
102 | pr_err("Unexpected return code %d while trying " | |
103 | "to power down last online CPU %d\n", | |
104 | ret, cpu); | |
105 | ++err; | |
106 | } | |
107 | } else if (cpu == tos_resident_cpu) { | |
108 | if (ret != -EPERM) { | |
109 | pr_err("Unexpected return code %d while trying " | |
110 | "to power down TOS resident CPU %d\n", | |
111 | ret, cpu); | |
112 | ++err; | |
113 | } | |
114 | } else if (ret != 0) { | |
115 | pr_err("Error occurred (%d) while trying " | |
116 | "to power down CPU %d\n", ret, cpu); | |
117 | ++err; | |
118 | } | |
119 | ||
120 | if (ret == 0) | |
121 | cpumask_set_cpu(cpu, offlined_cpus); | |
122 | } | |
123 | ||
124 | /* Try to power up all the CPUs that have been offlined. */ | |
125 | for_each_cpu(cpu, offlined_cpus) { | |
126 | int ret = cpu_up(cpu); | |
127 | ||
128 | if (ret != 0) { | |
129 | pr_err("Error occurred (%d) while trying " | |
130 | "to power up CPU %d\n", ret, cpu); | |
131 | ++err; | |
132 | } else { | |
133 | cpumask_clear_cpu(cpu, offlined_cpus); | |
134 | } | |
135 | } | |
136 | ||
137 | /* | |
138 | * Something went bad at some point and some CPUs could not be turned | |
139 | * back on. | |
140 | */ | |
141 | WARN_ON(!cpumask_empty(offlined_cpus) || | |
142 | num_online_cpus() != nb_available_cpus); | |
143 | ||
144 | return err; | |
145 | } | |
146 | ||
7401056d SH |
147 | static void free_cpu_groups(int num, cpumask_var_t **pcpu_groups) |
148 | { | |
149 | int i; | |
150 | cpumask_var_t *cpu_groups = *pcpu_groups; | |
151 | ||
152 | for (i = 0; i < num; ++i) | |
153 | free_cpumask_var(cpu_groups[i]); | |
154 | kfree(cpu_groups); | |
155 | } | |
156 | ||
157 | static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups) | |
158 | { | |
159 | int num_groups = 0; | |
160 | cpumask_var_t tmp, *cpu_groups; | |
161 | ||
162 | if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) | |
163 | return -ENOMEM; | |
164 | ||
165 | cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups), | |
166 | GFP_KERNEL); | |
167 | if (!cpu_groups) | |
168 | return -ENOMEM; | |
169 | ||
170 | cpumask_copy(tmp, cpu_online_mask); | |
171 | ||
172 | while (!cpumask_empty(tmp)) { | |
173 | const struct cpumask *cpu_group = | |
174 | topology_core_cpumask(cpumask_any(tmp)); | |
175 | ||
176 | if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) { | |
177 | free_cpu_groups(num_groups, &cpu_groups); | |
178 | return -ENOMEM; | |
179 | } | |
180 | cpumask_copy(cpu_groups[num_groups++], cpu_group); | |
181 | cpumask_andnot(tmp, tmp, cpu_group); | |
182 | } | |
183 | ||
184 | free_cpumask_var(tmp); | |
185 | *pcpu_groups = cpu_groups; | |
186 | ||
187 | return num_groups; | |
188 | } | |
189 | ||
ea8b1c4a KB |
190 | static int hotplug_tests(void) |
191 | { | |
7401056d SH |
192 | int i, nb_cpu_group, err = -ENOMEM; |
193 | cpumask_var_t offlined_cpus, *cpu_groups; | |
ea8b1c4a KB |
194 | char *page_buf; |
195 | ||
ea8b1c4a KB |
196 | if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL)) |
197 | return err; | |
7401056d SH |
198 | |
199 | nb_cpu_group = alloc_init_cpu_groups(&cpu_groups); | |
200 | if (nb_cpu_group < 0) | |
ea8b1c4a KB |
201 | goto out_free_cpus; |
202 | page_buf = (char *)__get_free_page(GFP_KERNEL); | |
203 | if (!page_buf) | |
dff4113d | 204 | goto out_free_cpu_groups; |
ea8b1c4a KB |
205 | |
206 | err = 0; | |
ea8b1c4a KB |
207 | /* |
208 | * Of course the last CPU cannot be powered down and cpu_down() should | |
209 | * refuse doing that. | |
210 | */ | |
211 | pr_info("Trying to turn off and on again all CPUs\n"); | |
212 | err += down_and_up_cpus(cpu_online_mask, offlined_cpus); | |
213 | ||
214 | /* | |
dff4113d SH |
215 | * Take down CPUs by cpu group this time. When the last CPU is turned |
216 | * off, the cpu group itself should shut down. | |
ea8b1c4a | 217 | */ |
dff4113d | 218 | for (i = 0; i < nb_cpu_group; ++i) { |
ea8b1c4a | 219 | ssize_t len = cpumap_print_to_pagebuf(true, page_buf, |
dff4113d | 220 | cpu_groups[i]); |
ea8b1c4a KB |
221 | /* Remove trailing newline. */ |
222 | page_buf[len - 1] = '\0'; | |
dff4113d SH |
223 | pr_info("Trying to turn off and on again group %d (CPUs %s)\n", |
224 | i, page_buf); | |
225 | err += down_and_up_cpus(cpu_groups[i], offlined_cpus); | |
ea8b1c4a KB |
226 | } |
227 | ||
228 | free_page((unsigned long)page_buf); | |
dff4113d | 229 | out_free_cpu_groups: |
7401056d | 230 | free_cpu_groups(nb_cpu_group, &cpu_groups); |
ea8b1c4a KB |
231 | out_free_cpus: |
232 | free_cpumask_var(offlined_cpus); | |
233 | return err; | |
234 | } | |
235 | ||
ff07a23f | 236 | static void dummy_callback(struct timer_list *unused) {} |
ea8b1c4a KB |
237 | |
238 | static int suspend_cpu(int index, bool broadcast) | |
239 | { | |
240 | int ret; | |
241 | ||
242 | arch_cpu_idle_enter(); | |
243 | ||
244 | if (broadcast) { | |
245 | /* | |
246 | * The local timer will be shut down, we need to enter tick | |
247 | * broadcast. | |
248 | */ | |
249 | ret = tick_broadcast_enter(); | |
250 | if (ret) { | |
251 | /* | |
252 | * In the absence of hardware broadcast mechanism, | |
253 | * this CPU might be used to broadcast wakeups, which | |
254 | * may be why entering tick broadcast has failed. | |
255 | * There is little the kernel can do to work around | |
256 | * that, so enter WFI instead (idle state 0). | |
257 | */ | |
258 | cpu_do_idle(); | |
259 | ret = 0; | |
260 | goto out_arch_exit; | |
261 | } | |
262 | } | |
263 | ||
264 | /* | |
265 | * Replicate the common ARM cpuidle enter function | |
266 | * (arm_enter_idle_state). | |
267 | */ | |
268 | ret = CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, index); | |
269 | ||
270 | if (broadcast) | |
271 | tick_broadcast_exit(); | |
272 | ||
273 | out_arch_exit: | |
274 | arch_cpu_idle_exit(); | |
275 | ||
276 | return ret; | |
277 | } | |
278 | ||
279 | static int suspend_test_thread(void *arg) | |
280 | { | |
281 | int cpu = (long)arg; | |
282 | int i, nb_suspend = 0, nb_shallow_sleep = 0, nb_err = 0; | |
283 | struct sched_param sched_priority = { .sched_priority = MAX_RT_PRIO-1 }; | |
284 | struct cpuidle_device *dev; | |
285 | struct cpuidle_driver *drv; | |
286 | /* No need for an actual callback, we just want to wake up the CPU. */ | |
4309cfe3 | 287 | struct timer_list wakeup_timer; |
ea8b1c4a KB |
288 | |
289 | /* Wait for the main thread to give the start signal. */ | |
290 | wait_for_completion(&suspend_threads_started); | |
291 | ||
292 | /* Set maximum priority to preempt all other threads on this CPU. */ | |
293 | if (sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_priority)) | |
294 | pr_warn("Failed to set suspend thread scheduler on CPU %d\n", | |
295 | cpu); | |
296 | ||
297 | dev = this_cpu_read(cpuidle_devices); | |
298 | drv = cpuidle_get_cpu_driver(dev); | |
299 | ||
300 | pr_info("CPU %d entering suspend cycles, states 1 through %d\n", | |
301 | cpu, drv->state_count - 1); | |
302 | ||
ff07a23f | 303 | timer_setup_on_stack(&wakeup_timer, dummy_callback, 0); |
ea8b1c4a KB |
304 | for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) { |
305 | int index; | |
306 | /* | |
307 | * Test all possible states, except 0 (which is usually WFI and | |
308 | * doesn't use PSCI). | |
309 | */ | |
310 | for (index = 1; index < drv->state_count; ++index) { | |
311 | struct cpuidle_state *state = &drv->states[index]; | |
312 | bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP; | |
313 | int ret; | |
314 | ||
315 | /* | |
316 | * Set the timer to wake this CPU up in some time (which | |
317 | * should be largely sufficient for entering suspend). | |
318 | * If the local tick is disabled when entering suspend, | |
319 | * suspend_cpu() takes care of switching to a broadcast | |
320 | * tick, so the timer will still wake us up. | |
321 | */ | |
322 | mod_timer(&wakeup_timer, jiffies + | |
323 | usecs_to_jiffies(state->target_residency)); | |
324 | ||
325 | /* IRQs must be disabled during suspend operations. */ | |
326 | local_irq_disable(); | |
327 | ||
328 | ret = suspend_cpu(index, broadcast); | |
329 | ||
330 | /* | |
331 | * We have woken up. Re-enable IRQs to handle any | |
332 | * pending interrupt, do not wait until the end of the | |
333 | * loop. | |
334 | */ | |
335 | local_irq_enable(); | |
336 | ||
337 | if (ret == index) { | |
338 | ++nb_suspend; | |
339 | } else if (ret >= 0) { | |
340 | /* We did not enter the expected state. */ | |
341 | ++nb_shallow_sleep; | |
342 | } else { | |
343 | pr_err("Failed to suspend CPU %d: error %d " | |
344 | "(requested state %d, cycle %d)\n", | |
345 | cpu, ret, index, i); | |
346 | ++nb_err; | |
347 | } | |
348 | } | |
349 | } | |
350 | ||
351 | /* | |
352 | * Disable the timer to make sure that the timer will not trigger | |
353 | * later. | |
354 | */ | |
355 | del_timer(&wakeup_timer); | |
51d3290c | 356 | destroy_timer_on_stack(&wakeup_timer); |
ea8b1c4a KB |
357 | |
358 | if (atomic_dec_return_relaxed(&nb_active_threads) == 0) | |
359 | complete(&suspend_threads_done); | |
360 | ||
361 | /* Give up on RT scheduling and wait for termination. */ | |
362 | sched_priority.sched_priority = 0; | |
363 | if (sched_setscheduler_nocheck(current, SCHED_NORMAL, &sched_priority)) | |
364 | pr_warn("Failed to set suspend thread scheduler on CPU %d\n", | |
365 | cpu); | |
366 | for (;;) { | |
367 | /* Needs to be set first to avoid missing a wakeup. */ | |
368 | set_current_state(TASK_INTERRUPTIBLE); | |
369 | if (kthread_should_stop()) { | |
370 | __set_current_state(TASK_RUNNING); | |
371 | break; | |
372 | } | |
373 | schedule(); | |
374 | } | |
375 | ||
376 | pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n", | |
377 | cpu, nb_suspend, nb_shallow_sleep, nb_err); | |
378 | ||
379 | return nb_err; | |
380 | } | |
381 | ||
382 | static int suspend_tests(void) | |
383 | { | |
384 | int i, cpu, err = 0; | |
385 | struct task_struct **threads; | |
386 | int nb_threads = 0; | |
387 | ||
388 | threads = kmalloc_array(nb_available_cpus, sizeof(*threads), | |
389 | GFP_KERNEL); | |
390 | if (!threads) | |
391 | return -ENOMEM; | |
392 | ||
393 | /* | |
394 | * Stop cpuidle to prevent the idle tasks from entering a deep sleep | |
395 | * mode, as it might interfere with the suspend threads on other CPUs. | |
396 | * This does not prevent the suspend threads from using cpuidle (only | |
397 | * the idle tasks check this status). Take the idle lock so that | |
398 | * the cpuidle driver and device look-up can be carried out safely. | |
399 | */ | |
400 | cpuidle_pause_and_lock(); | |
401 | ||
402 | for_each_online_cpu(cpu) { | |
403 | struct task_struct *thread; | |
404 | /* Check that cpuidle is available on that CPU. */ | |
405 | struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); | |
406 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | |
407 | ||
408 | if (!dev || !drv) { | |
409 | pr_warn("cpuidle not available on CPU %d, ignoring\n", | |
410 | cpu); | |
411 | continue; | |
412 | } | |
413 | ||
414 | thread = kthread_create_on_cpu(suspend_test_thread, | |
415 | (void *)(long)cpu, cpu, | |
416 | "psci_suspend_test"); | |
417 | if (IS_ERR(thread)) | |
418 | pr_err("Failed to create kthread on CPU %d\n", cpu); | |
419 | else | |
420 | threads[nb_threads++] = thread; | |
421 | } | |
422 | ||
423 | if (nb_threads < 1) { | |
424 | err = -ENODEV; | |
425 | goto out; | |
426 | } | |
427 | ||
428 | atomic_set(&nb_active_threads, nb_threads); | |
429 | ||
430 | /* | |
431 | * Wake up the suspend threads. To avoid the main thread being preempted | |
432 | * before all the threads have been unparked, the suspend threads will | |
433 | * wait for the completion of suspend_threads_started. | |
434 | */ | |
435 | for (i = 0; i < nb_threads; ++i) | |
436 | wake_up_process(threads[i]); | |
437 | complete_all(&suspend_threads_started); | |
438 | ||
439 | wait_for_completion(&suspend_threads_done); | |
440 | ||
441 | ||
442 | /* Stop and destroy all threads, get return status. */ | |
443 | for (i = 0; i < nb_threads; ++i) | |
444 | err += kthread_stop(threads[i]); | |
445 | out: | |
446 | cpuidle_resume_and_unlock(); | |
447 | kfree(threads); | |
448 | return err; | |
449 | } | |
450 | ||
451 | static int __init psci_checker(void) | |
452 | { | |
453 | int ret; | |
454 | ||
455 | /* | |
456 | * Since we're in an initcall, we assume that all the CPUs that all | |
457 | * CPUs that can be onlined have been onlined. | |
458 | * | |
459 | * The tests assume that hotplug is enabled but nobody else is using it, | |
460 | * otherwise the results will be unpredictable. However, since there | |
461 | * is no userspace yet in initcalls, that should be fine, as long as | |
462 | * no torture test is running at the same time (see Kconfig). | |
463 | */ | |
464 | nb_available_cpus = num_online_cpus(); | |
465 | ||
466 | /* Check PSCI operations are set up and working. */ | |
467 | ret = psci_ops_check(); | |
468 | if (ret) | |
469 | return ret; | |
470 | ||
471 | pr_info("PSCI checker started using %u CPUs\n", nb_available_cpus); | |
472 | ||
473 | pr_info("Starting hotplug tests\n"); | |
474 | ret = hotplug_tests(); | |
475 | if (ret == 0) | |
476 | pr_info("Hotplug tests passed OK\n"); | |
477 | else if (ret > 0) | |
478 | pr_err("%d error(s) encountered in hotplug tests\n", ret); | |
479 | else { | |
480 | pr_err("Out of memory\n"); | |
481 | return ret; | |
482 | } | |
483 | ||
484 | pr_info("Starting suspend tests (%d cycles per state)\n", | |
485 | NUM_SUSPEND_CYCLE); | |
486 | ret = suspend_tests(); | |
487 | if (ret == 0) | |
488 | pr_info("Suspend tests passed OK\n"); | |
489 | else if (ret > 0) | |
490 | pr_err("%d error(s) encountered in suspend tests\n", ret); | |
491 | else { | |
492 | switch (ret) { | |
493 | case -ENOMEM: | |
494 | pr_err("Out of memory\n"); | |
495 | break; | |
496 | case -ENODEV: | |
497 | pr_warn("Could not start suspend tests on any CPU\n"); | |
498 | break; | |
499 | } | |
500 | } | |
501 | ||
502 | pr_info("PSCI checker completed\n"); | |
503 | return ret < 0 ? ret : 0; | |
504 | } | |
505 | late_initcall(psci_checker); |