Commit | Line | Data |
---|---|---|
6abf32f1 AP |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * RISC-V SBI CPU idle driver. | |
4 | * | |
5 | * Copyright (c) 2021 Western Digital Corporation or its affiliates. | |
6 | * Copyright (c) 2022 Ventana Micro Systems Inc. | |
7 | */ | |
8 | ||
9 | #define pr_fmt(fmt) "cpuidle-riscv-sbi: " fmt | |
10 | ||
11 | #include <linux/cpuidle.h> | |
12 | #include <linux/cpumask.h> | |
13 | #include <linux/cpu_pm.h> | |
14 | #include <linux/cpu_cooling.h> | |
15 | #include <linux/kernel.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/of.h> | |
18 | #include <linux/of_device.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/platform_device.h> | |
21 | #include <linux/pm_domain.h> | |
22 | #include <linux/pm_runtime.h> | |
23 | #include <asm/cpuidle.h> | |
24 | #include <asm/sbi.h> | |
f81f7861 | 25 | #include <asm/smp.h> |
6abf32f1 AP |
26 | #include <asm/suspend.h> |
27 | ||
28 | #include "dt_idle_states.h" | |
29 | #include "dt_idle_genpd.h" | |
30 | ||
31 | struct sbi_cpuidle_data { | |
32 | u32 *states; | |
33 | struct device *dev; | |
34 | }; | |
35 | ||
36 | struct sbi_domain_state { | |
37 | bool available; | |
38 | u32 state; | |
39 | }; | |
40 | ||
41 | static DEFINE_PER_CPU_READ_MOSTLY(struct sbi_cpuidle_data, sbi_cpuidle_data); | |
42 | static DEFINE_PER_CPU(struct sbi_domain_state, domain_state); | |
43 | static bool sbi_cpuidle_use_osi; | |
44 | static bool sbi_cpuidle_use_cpuhp; | |
45 | static bool sbi_cpuidle_pd_allow_domain_state; | |
46 | ||
47 | static inline void sbi_set_domain_state(u32 state) | |
48 | { | |
49 | struct sbi_domain_state *data = this_cpu_ptr(&domain_state); | |
50 | ||
51 | data->available = true; | |
52 | data->state = state; | |
53 | } | |
54 | ||
55 | static inline u32 sbi_get_domain_state(void) | |
56 | { | |
57 | struct sbi_domain_state *data = this_cpu_ptr(&domain_state); | |
58 | ||
59 | return data->state; | |
60 | } | |
61 | ||
62 | static inline void sbi_clear_domain_state(void) | |
63 | { | |
64 | struct sbi_domain_state *data = this_cpu_ptr(&domain_state); | |
65 | ||
66 | data->available = false; | |
67 | } | |
68 | ||
69 | static inline bool sbi_is_domain_state_available(void) | |
70 | { | |
71 | struct sbi_domain_state *data = this_cpu_ptr(&domain_state); | |
72 | ||
73 | return data->available; | |
74 | } | |
75 | ||
76 | static int sbi_suspend_finisher(unsigned long suspend_type, | |
77 | unsigned long resume_addr, | |
78 | unsigned long opaque) | |
79 | { | |
80 | struct sbiret ret; | |
81 | ||
82 | ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND, | |
83 | suspend_type, resume_addr, opaque, 0, 0, 0); | |
84 | ||
85 | return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0; | |
86 | } | |
87 | ||
88 | static int sbi_suspend(u32 state) | |
89 | { | |
90 | if (state & SBI_HSM_SUSP_NON_RET_BIT) | |
91 | return cpu_suspend(state, sbi_suspend_finisher); | |
92 | else | |
93 | return sbi_suspend_finisher(state, 0, 0); | |
94 | } | |
95 | ||
96 | static int sbi_cpuidle_enter_state(struct cpuidle_device *dev, | |
97 | struct cpuidle_driver *drv, int idx) | |
98 | { | |
99 | u32 *states = __this_cpu_read(sbi_cpuidle_data.states); | |
cfadbb9d | 100 | u32 state = states[idx]; |
6abf32f1 | 101 | |
cfadbb9d AP |
102 | if (state & SBI_HSM_SUSP_NON_RET_BIT) |
103 | return CPU_PM_CPU_IDLE_ENTER_PARAM(sbi_suspend, idx, state); | |
104 | else | |
105 | return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(sbi_suspend, | |
106 | idx, state); | |
6abf32f1 AP |
107 | } |
108 | ||
109 | static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev, | |
110 | struct cpuidle_driver *drv, int idx, | |
111 | bool s2idle) | |
112 | { | |
113 | struct sbi_cpuidle_data *data = this_cpu_ptr(&sbi_cpuidle_data); | |
114 | u32 *states = data->states; | |
115 | struct device *pd_dev = data->dev; | |
116 | u32 state; | |
117 | int ret; | |
118 | ||
119 | ret = cpu_pm_enter(); | |
120 | if (ret) | |
121 | return -1; | |
122 | ||
123 | /* Do runtime PM to manage a hierarchical CPU toplogy. */ | |
6f0e6c15 | 124 | ct_irq_enter_irqson(); |
6abf32f1 AP |
125 | if (s2idle) |
126 | dev_pm_genpd_suspend(pd_dev); | |
127 | else | |
128 | pm_runtime_put_sync_suspend(pd_dev); | |
6f0e6c15 | 129 | ct_irq_exit_irqson(); |
6abf32f1 AP |
130 | |
131 | if (sbi_is_domain_state_available()) | |
132 | state = sbi_get_domain_state(); | |
133 | else | |
134 | state = states[idx]; | |
135 | ||
136 | ret = sbi_suspend(state) ? -1 : idx; | |
137 | ||
6f0e6c15 | 138 | ct_irq_enter_irqson(); |
6abf32f1 AP |
139 | if (s2idle) |
140 | dev_pm_genpd_resume(pd_dev); | |
141 | else | |
142 | pm_runtime_get_sync(pd_dev); | |
6f0e6c15 | 143 | ct_irq_exit_irqson(); |
6abf32f1 AP |
144 | |
145 | cpu_pm_exit(); | |
146 | ||
147 | /* Clear the domain state to start fresh when back from idle. */ | |
148 | sbi_clear_domain_state(); | |
149 | return ret; | |
150 | } | |
151 | ||
152 | static int sbi_enter_domain_idle_state(struct cpuidle_device *dev, | |
153 | struct cpuidle_driver *drv, int idx) | |
154 | { | |
155 | return __sbi_enter_domain_idle_state(dev, drv, idx, false); | |
156 | } | |
157 | ||
158 | static int sbi_enter_s2idle_domain_idle_state(struct cpuidle_device *dev, | |
159 | struct cpuidle_driver *drv, | |
160 | int idx) | |
161 | { | |
162 | return __sbi_enter_domain_idle_state(dev, drv, idx, true); | |
163 | } | |
164 | ||
165 | static int sbi_cpuidle_cpuhp_up(unsigned int cpu) | |
166 | { | |
167 | struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev); | |
168 | ||
169 | if (pd_dev) | |
170 | pm_runtime_get_sync(pd_dev); | |
171 | ||
172 | return 0; | |
173 | } | |
174 | ||
175 | static int sbi_cpuidle_cpuhp_down(unsigned int cpu) | |
176 | { | |
177 | struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev); | |
178 | ||
179 | if (pd_dev) { | |
180 | pm_runtime_put_sync(pd_dev); | |
181 | /* Clear domain state to start fresh at next online. */ | |
182 | sbi_clear_domain_state(); | |
183 | } | |
184 | ||
185 | return 0; | |
186 | } | |
187 | ||
188 | static void sbi_idle_init_cpuhp(void) | |
189 | { | |
190 | int err; | |
191 | ||
192 | if (!sbi_cpuidle_use_cpuhp) | |
193 | return; | |
194 | ||
195 | err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING, | |
196 | "cpuidle/sbi:online", | |
197 | sbi_cpuidle_cpuhp_up, | |
198 | sbi_cpuidle_cpuhp_down); | |
199 | if (err) | |
200 | pr_warn("Failed %d while setup cpuhp state\n", err); | |
201 | } | |
202 | ||
203 | static const struct of_device_id sbi_cpuidle_state_match[] = { | |
204 | { .compatible = "riscv,idle-state", | |
205 | .data = sbi_cpuidle_enter_state }, | |
206 | { }, | |
207 | }; | |
208 | ||
209 | static bool sbi_suspend_state_is_valid(u32 state) | |
210 | { | |
211 | if (state > SBI_HSM_SUSPEND_RET_DEFAULT && | |
212 | state < SBI_HSM_SUSPEND_RET_PLATFORM) | |
213 | return false; | |
214 | if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT && | |
215 | state < SBI_HSM_SUSPEND_NON_RET_PLATFORM) | |
216 | return false; | |
217 | return true; | |
218 | } | |
219 | ||
220 | static int sbi_dt_parse_state_node(struct device_node *np, u32 *state) | |
221 | { | |
222 | int err = of_property_read_u32(np, "riscv,sbi-suspend-param", state); | |
223 | ||
224 | if (err) { | |
225 | pr_warn("%pOF missing riscv,sbi-suspend-param property\n", np); | |
226 | return err; | |
227 | } | |
228 | ||
229 | if (!sbi_suspend_state_is_valid(*state)) { | |
230 | pr_warn("Invalid SBI suspend state %#x\n", *state); | |
231 | return -EINVAL; | |
232 | } | |
233 | ||
234 | return 0; | |
235 | } | |
236 | ||
237 | static int sbi_dt_cpu_init_topology(struct cpuidle_driver *drv, | |
238 | struct sbi_cpuidle_data *data, | |
239 | unsigned int state_count, int cpu) | |
240 | { | |
241 | /* Currently limit the hierarchical topology to be used in OSI mode. */ | |
242 | if (!sbi_cpuidle_use_osi) | |
243 | return 0; | |
244 | ||
245 | data->dev = dt_idle_attach_cpu(cpu, "sbi"); | |
246 | if (IS_ERR_OR_NULL(data->dev)) | |
247 | return PTR_ERR_OR_ZERO(data->dev); | |
248 | ||
249 | /* | |
250 | * Using the deepest state for the CPU to trigger a potential selection | |
251 | * of a shared state for the domain, assumes the domain states are all | |
252 | * deeper states. | |
253 | */ | |
254 | drv->states[state_count - 1].enter = sbi_enter_domain_idle_state; | |
255 | drv->states[state_count - 1].enter_s2idle = | |
256 | sbi_enter_s2idle_domain_idle_state; | |
257 | sbi_cpuidle_use_cpuhp = true; | |
258 | ||
259 | return 0; | |
260 | } | |
261 | ||
262 | static int sbi_cpuidle_dt_init_states(struct device *dev, | |
263 | struct cpuidle_driver *drv, | |
264 | unsigned int cpu, | |
265 | unsigned int state_count) | |
266 | { | |
267 | struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu); | |
268 | struct device_node *state_node; | |
269 | struct device_node *cpu_node; | |
270 | u32 *states; | |
271 | int i, ret; | |
272 | ||
273 | cpu_node = of_cpu_device_node_get(cpu); | |
274 | if (!cpu_node) | |
275 | return -ENODEV; | |
276 | ||
277 | states = devm_kcalloc(dev, state_count, sizeof(*states), GFP_KERNEL); | |
278 | if (!states) { | |
279 | ret = -ENOMEM; | |
280 | goto fail; | |
281 | } | |
282 | ||
283 | /* Parse SBI specific details from state DT nodes */ | |
284 | for (i = 1; i < state_count; i++) { | |
285 | state_node = of_get_cpu_state_node(cpu_node, i - 1); | |
286 | if (!state_node) | |
287 | break; | |
288 | ||
289 | ret = sbi_dt_parse_state_node(state_node, &states[i]); | |
290 | of_node_put(state_node); | |
291 | ||
292 | if (ret) | |
293 | return ret; | |
294 | ||
295 | pr_debug("sbi-state %#x index %d\n", states[i], i); | |
296 | } | |
297 | if (i != state_count) { | |
298 | ret = -ENODEV; | |
299 | goto fail; | |
300 | } | |
301 | ||
302 | /* Initialize optional data, used for the hierarchical topology. */ | |
303 | ret = sbi_dt_cpu_init_topology(drv, data, state_count, cpu); | |
304 | if (ret < 0) | |
305 | return ret; | |
306 | ||
307 | /* Store states in the per-cpu struct. */ | |
308 | data->states = states; | |
309 | ||
310 | fail: | |
311 | of_node_put(cpu_node); | |
312 | ||
313 | return ret; | |
314 | } | |
315 | ||
316 | static void sbi_cpuidle_deinit_cpu(int cpu) | |
317 | { | |
318 | struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu); | |
319 | ||
320 | dt_idle_detach_cpu(data->dev); | |
321 | sbi_cpuidle_use_cpuhp = false; | |
322 | } | |
323 | ||
324 | static int sbi_cpuidle_init_cpu(struct device *dev, int cpu) | |
325 | { | |
326 | struct cpuidle_driver *drv; | |
327 | unsigned int state_count = 0; | |
328 | int ret = 0; | |
329 | ||
330 | drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL); | |
331 | if (!drv) | |
332 | return -ENOMEM; | |
333 | ||
334 | drv->name = "sbi_cpuidle"; | |
335 | drv->owner = THIS_MODULE; | |
336 | drv->cpumask = (struct cpumask *)cpumask_of(cpu); | |
337 | ||
338 | /* RISC-V architectural WFI to be represented as state index 0. */ | |
339 | drv->states[0].enter = sbi_cpuidle_enter_state; | |
340 | drv->states[0].exit_latency = 1; | |
341 | drv->states[0].target_residency = 1; | |
342 | drv->states[0].power_usage = UINT_MAX; | |
343 | strcpy(drv->states[0].name, "WFI"); | |
344 | strcpy(drv->states[0].desc, "RISC-V WFI"); | |
345 | ||
346 | /* | |
347 | * If no DT idle states are detected (ret == 0) let the driver | |
348 | * initialization fail accordingly since there is no reason to | |
349 | * initialize the idle driver if only wfi is supported, the | |
350 | * default archictectural back-end already executes wfi | |
351 | * on idle entry. | |
352 | */ | |
353 | ret = dt_init_idle_driver(drv, sbi_cpuidle_state_match, 1); | |
354 | if (ret <= 0) { | |
355 | pr_debug("HART%ld: failed to parse DT idle states\n", | |
356 | cpuid_to_hartid_map(cpu)); | |
357 | return ret ? : -ENODEV; | |
358 | } | |
359 | state_count = ret + 1; /* Include WFI state as well */ | |
360 | ||
361 | /* Initialize idle states from DT. */ | |
362 | ret = sbi_cpuidle_dt_init_states(dev, drv, cpu, state_count); | |
363 | if (ret) { | |
364 | pr_err("HART%ld: failed to init idle states\n", | |
365 | cpuid_to_hartid_map(cpu)); | |
366 | return ret; | |
367 | } | |
368 | ||
369 | ret = cpuidle_register(drv, NULL); | |
370 | if (ret) | |
371 | goto deinit; | |
372 | ||
373 | cpuidle_cooling_register(drv); | |
374 | ||
375 | return 0; | |
376 | deinit: | |
377 | sbi_cpuidle_deinit_cpu(cpu); | |
378 | return ret; | |
379 | } | |
380 | ||
381 | static void sbi_cpuidle_domain_sync_state(struct device *dev) | |
382 | { | |
383 | /* | |
384 | * All devices have now been attached/probed to the PM domain | |
385 | * topology, hence it's fine to allow domain states to be picked. | |
386 | */ | |
387 | sbi_cpuidle_pd_allow_domain_state = true; | |
388 | } | |
389 | ||
390 | #ifdef CONFIG_DT_IDLE_GENPD | |
391 | ||
392 | static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd) | |
393 | { | |
394 | struct genpd_power_state *state = &pd->states[pd->state_idx]; | |
395 | u32 *pd_state; | |
396 | ||
397 | if (!state->data) | |
398 | return 0; | |
399 | ||
400 | if (!sbi_cpuidle_pd_allow_domain_state) | |
401 | return -EBUSY; | |
402 | ||
403 | /* OSI mode is enabled, set the corresponding domain state. */ | |
404 | pd_state = state->data; | |
405 | sbi_set_domain_state(*pd_state); | |
406 | ||
407 | return 0; | |
408 | } | |
409 | ||
410 | struct sbi_pd_provider { | |
411 | struct list_head link; | |
412 | struct device_node *node; | |
413 | }; | |
414 | ||
415 | static LIST_HEAD(sbi_pd_providers); | |
416 | ||
417 | static int sbi_pd_init(struct device_node *np) | |
418 | { | |
419 | struct generic_pm_domain *pd; | |
420 | struct sbi_pd_provider *pd_provider; | |
421 | struct dev_power_governor *pd_gov; | |
a6653fb5 | 422 | int ret = -ENOMEM; |
6abf32f1 AP |
423 | |
424 | pd = dt_idle_pd_alloc(np, sbi_dt_parse_state_node); | |
425 | if (!pd) | |
426 | goto out; | |
427 | ||
428 | pd_provider = kzalloc(sizeof(*pd_provider), GFP_KERNEL); | |
429 | if (!pd_provider) | |
430 | goto free_pd; | |
431 | ||
432 | pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN; | |
433 | ||
434 | /* Allow power off when OSI is available. */ | |
435 | if (sbi_cpuidle_use_osi) | |
436 | pd->power_off = sbi_cpuidle_pd_power_off; | |
437 | else | |
438 | pd->flags |= GENPD_FLAG_ALWAYS_ON; | |
439 | ||
440 | /* Use governor for CPU PM domains if it has some states to manage. */ | |
a6653fb5 | 441 | pd_gov = pd->states ? &pm_domain_cpu_gov : NULL; |
6abf32f1 AP |
442 | |
443 | ret = pm_genpd_init(pd, pd_gov, false); | |
444 | if (ret) | |
445 | goto free_pd_prov; | |
446 | ||
447 | ret = of_genpd_add_provider_simple(np, pd); | |
448 | if (ret) | |
449 | goto remove_pd; | |
450 | ||
451 | pd_provider->node = of_node_get(np); | |
452 | list_add(&pd_provider->link, &sbi_pd_providers); | |
453 | ||
454 | pr_debug("init PM domain %s\n", pd->name); | |
455 | return 0; | |
456 | ||
457 | remove_pd: | |
458 | pm_genpd_remove(pd); | |
459 | free_pd_prov: | |
460 | kfree(pd_provider); | |
461 | free_pd: | |
462 | dt_idle_pd_free(pd); | |
463 | out: | |
464 | pr_err("failed to init PM domain ret=%d %pOF\n", ret, np); | |
465 | return ret; | |
466 | } | |
467 | ||
468 | static void sbi_pd_remove(void) | |
469 | { | |
470 | struct sbi_pd_provider *pd_provider, *it; | |
471 | struct generic_pm_domain *genpd; | |
472 | ||
473 | list_for_each_entry_safe(pd_provider, it, &sbi_pd_providers, link) { | |
474 | of_genpd_del_provider(pd_provider->node); | |
475 | ||
476 | genpd = of_genpd_remove_last(pd_provider->node); | |
477 | if (!IS_ERR(genpd)) | |
478 | kfree(genpd); | |
479 | ||
480 | of_node_put(pd_provider->node); | |
481 | list_del(&pd_provider->link); | |
482 | kfree(pd_provider); | |
483 | } | |
484 | } | |
485 | ||
486 | static int sbi_genpd_probe(struct device_node *np) | |
487 | { | |
488 | struct device_node *node; | |
489 | int ret = 0, pd_count = 0; | |
490 | ||
491 | if (!np) | |
492 | return -ENODEV; | |
493 | ||
494 | /* | |
495 | * Parse child nodes for the "#power-domain-cells" property and | |
496 | * initialize a genpd/genpd-of-provider pair when it's found. | |
497 | */ | |
498 | for_each_child_of_node(np, node) { | |
499 | if (!of_find_property(node, "#power-domain-cells", NULL)) | |
500 | continue; | |
501 | ||
502 | ret = sbi_pd_init(node); | |
503 | if (ret) | |
504 | goto put_node; | |
505 | ||
506 | pd_count++; | |
507 | } | |
508 | ||
509 | /* Bail out if not using the hierarchical CPU topology. */ | |
510 | if (!pd_count) | |
511 | goto no_pd; | |
512 | ||
513 | /* Link genpd masters/subdomains to model the CPU topology. */ | |
514 | ret = dt_idle_pd_init_topology(np); | |
515 | if (ret) | |
516 | goto remove_pd; | |
517 | ||
518 | return 0; | |
519 | ||
520 | put_node: | |
521 | of_node_put(node); | |
522 | remove_pd: | |
523 | sbi_pd_remove(); | |
524 | pr_err("failed to create CPU PM domains ret=%d\n", ret); | |
525 | no_pd: | |
526 | return ret; | |
527 | } | |
528 | ||
529 | #else | |
530 | ||
531 | static inline int sbi_genpd_probe(struct device_node *np) | |
532 | { | |
533 | return 0; | |
534 | } | |
535 | ||
536 | #endif | |
537 | ||
538 | static int sbi_cpuidle_probe(struct platform_device *pdev) | |
539 | { | |
540 | int cpu, ret; | |
541 | struct cpuidle_driver *drv; | |
542 | struct cpuidle_device *dev; | |
543 | struct device_node *np, *pds_node; | |
544 | ||
545 | /* Detect OSI support based on CPU DT nodes */ | |
546 | sbi_cpuidle_use_osi = true; | |
547 | for_each_possible_cpu(cpu) { | |
548 | np = of_cpu_device_node_get(cpu); | |
549 | if (np && | |
550 | of_find_property(np, "power-domains", NULL) && | |
551 | of_find_property(np, "power-domain-names", NULL)) { | |
552 | continue; | |
553 | } else { | |
554 | sbi_cpuidle_use_osi = false; | |
555 | break; | |
556 | } | |
557 | } | |
558 | ||
559 | /* Populate generic power domains from DT nodes */ | |
560 | pds_node = of_find_node_by_path("/cpus/power-domains"); | |
561 | if (pds_node) { | |
562 | ret = sbi_genpd_probe(pds_node); | |
563 | of_node_put(pds_node); | |
564 | if (ret) | |
565 | return ret; | |
566 | } | |
567 | ||
568 | /* Initialize CPU idle driver for each CPU */ | |
569 | for_each_possible_cpu(cpu) { | |
570 | ret = sbi_cpuidle_init_cpu(&pdev->dev, cpu); | |
571 | if (ret) { | |
572 | pr_debug("HART%ld: idle driver init failed\n", | |
573 | cpuid_to_hartid_map(cpu)); | |
574 | goto out_fail; | |
575 | } | |
576 | } | |
577 | ||
578 | /* Setup CPU hotplut notifiers */ | |
579 | sbi_idle_init_cpuhp(); | |
580 | ||
581 | pr_info("idle driver registered for all CPUs\n"); | |
582 | ||
583 | return 0; | |
584 | ||
585 | out_fail: | |
586 | while (--cpu >= 0) { | |
587 | dev = per_cpu(cpuidle_devices, cpu); | |
588 | drv = cpuidle_get_cpu_driver(dev); | |
589 | cpuidle_unregister(drv); | |
590 | sbi_cpuidle_deinit_cpu(cpu); | |
591 | } | |
592 | ||
593 | return ret; | |
594 | } | |
595 | ||
596 | static struct platform_driver sbi_cpuidle_driver = { | |
597 | .probe = sbi_cpuidle_probe, | |
598 | .driver = { | |
599 | .name = "sbi-cpuidle", | |
600 | .sync_state = sbi_cpuidle_domain_sync_state, | |
601 | }, | |
602 | }; | |
603 | ||
604 | static int __init sbi_cpuidle_init(void) | |
605 | { | |
606 | int ret; | |
607 | struct platform_device *pdev; | |
608 | ||
609 | /* | |
610 | * The SBI HSM suspend function is only available when: | |
611 | * 1) SBI version is 0.3 or higher | |
612 | * 2) SBI HSM extension is available | |
613 | */ | |
614 | if ((sbi_spec_version < sbi_mk_version(0, 3)) || | |
615 | sbi_probe_extension(SBI_EXT_HSM) <= 0) { | |
616 | pr_info("HSM suspend not available\n"); | |
617 | return 0; | |
618 | } | |
619 | ||
620 | ret = platform_driver_register(&sbi_cpuidle_driver); | |
621 | if (ret) | |
622 | return ret; | |
623 | ||
624 | pdev = platform_device_register_simple("sbi-cpuidle", | |
625 | -1, NULL, 0); | |
626 | if (IS_ERR(pdev)) { | |
627 | platform_driver_unregister(&sbi_cpuidle_driver); | |
628 | return PTR_ERR(pdev); | |
629 | } | |
630 | ||
631 | return 0; | |
632 | } | |
633 | device_initcall(sbi_cpuidle_init); |