Commit | Line | Data |
---|---|---|
6abf32f1 AP |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * RISC-V SBI CPU idle driver. | |
4 | * | |
5 | * Copyright (c) 2021 Western Digital Corporation or its affiliates. | |
6 | * Copyright (c) 2022 Ventana Micro Systems Inc. | |
7 | */ | |
8 | ||
9 | #define pr_fmt(fmt) "cpuidle-riscv-sbi: " fmt | |
10 | ||
11 | #include <linux/cpuidle.h> | |
12 | #include <linux/cpumask.h> | |
13 | #include <linux/cpu_pm.h> | |
14 | #include <linux/cpu_cooling.h> | |
15 | #include <linux/kernel.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/of.h> | |
18 | #include <linux/of_device.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/platform_device.h> | |
21 | #include <linux/pm_domain.h> | |
22 | #include <linux/pm_runtime.h> | |
23 | #include <asm/cpuidle.h> | |
24 | #include <asm/sbi.h> | |
f81f7861 | 25 | #include <asm/smp.h> |
6abf32f1 AP |
26 | #include <asm/suspend.h> |
27 | ||
28 | #include "dt_idle_states.h" | |
29 | #include "dt_idle_genpd.h" | |
30 | ||
31 | struct sbi_cpuidle_data { | |
32 | u32 *states; | |
33 | struct device *dev; | |
34 | }; | |
35 | ||
36 | struct sbi_domain_state { | |
37 | bool available; | |
38 | u32 state; | |
39 | }; | |
40 | ||
41 | static DEFINE_PER_CPU_READ_MOSTLY(struct sbi_cpuidle_data, sbi_cpuidle_data); | |
42 | static DEFINE_PER_CPU(struct sbi_domain_state, domain_state); | |
43 | static bool sbi_cpuidle_use_osi; | |
44 | static bool sbi_cpuidle_use_cpuhp; | |
45 | static bool sbi_cpuidle_pd_allow_domain_state; | |
46 | ||
47 | static inline void sbi_set_domain_state(u32 state) | |
48 | { | |
49 | struct sbi_domain_state *data = this_cpu_ptr(&domain_state); | |
50 | ||
51 | data->available = true; | |
52 | data->state = state; | |
53 | } | |
54 | ||
55 | static inline u32 sbi_get_domain_state(void) | |
56 | { | |
57 | struct sbi_domain_state *data = this_cpu_ptr(&domain_state); | |
58 | ||
59 | return data->state; | |
60 | } | |
61 | ||
62 | static inline void sbi_clear_domain_state(void) | |
63 | { | |
64 | struct sbi_domain_state *data = this_cpu_ptr(&domain_state); | |
65 | ||
66 | data->available = false; | |
67 | } | |
68 | ||
69 | static inline bool sbi_is_domain_state_available(void) | |
70 | { | |
71 | struct sbi_domain_state *data = this_cpu_ptr(&domain_state); | |
72 | ||
73 | return data->available; | |
74 | } | |
75 | ||
76 | static int sbi_suspend_finisher(unsigned long suspend_type, | |
77 | unsigned long resume_addr, | |
78 | unsigned long opaque) | |
79 | { | |
80 | struct sbiret ret; | |
81 | ||
82 | ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND, | |
83 | suspend_type, resume_addr, opaque, 0, 0, 0); | |
84 | ||
85 | return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0; | |
86 | } | |
87 | ||
88 | static int sbi_suspend(u32 state) | |
89 | { | |
90 | if (state & SBI_HSM_SUSP_NON_RET_BIT) | |
91 | return cpu_suspend(state, sbi_suspend_finisher); | |
92 | else | |
93 | return sbi_suspend_finisher(state, 0, 0); | |
94 | } | |
95 | ||
96 | static int sbi_cpuidle_enter_state(struct cpuidle_device *dev, | |
97 | struct cpuidle_driver *drv, int idx) | |
98 | { | |
99 | u32 *states = __this_cpu_read(sbi_cpuidle_data.states); | |
100 | ||
101 | return CPU_PM_CPU_IDLE_ENTER_PARAM(sbi_suspend, idx, states[idx]); | |
102 | } | |
103 | ||
104 | static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev, | |
105 | struct cpuidle_driver *drv, int idx, | |
106 | bool s2idle) | |
107 | { | |
108 | struct sbi_cpuidle_data *data = this_cpu_ptr(&sbi_cpuidle_data); | |
109 | u32 *states = data->states; | |
110 | struct device *pd_dev = data->dev; | |
111 | u32 state; | |
112 | int ret; | |
113 | ||
114 | ret = cpu_pm_enter(); | |
115 | if (ret) | |
116 | return -1; | |
117 | ||
118 | /* Do runtime PM to manage a hierarchical CPU toplogy. */ | |
119 | rcu_irq_enter_irqson(); | |
120 | if (s2idle) | |
121 | dev_pm_genpd_suspend(pd_dev); | |
122 | else | |
123 | pm_runtime_put_sync_suspend(pd_dev); | |
124 | rcu_irq_exit_irqson(); | |
125 | ||
126 | if (sbi_is_domain_state_available()) | |
127 | state = sbi_get_domain_state(); | |
128 | else | |
129 | state = states[idx]; | |
130 | ||
131 | ret = sbi_suspend(state) ? -1 : idx; | |
132 | ||
133 | rcu_irq_enter_irqson(); | |
134 | if (s2idle) | |
135 | dev_pm_genpd_resume(pd_dev); | |
136 | else | |
137 | pm_runtime_get_sync(pd_dev); | |
138 | rcu_irq_exit_irqson(); | |
139 | ||
140 | cpu_pm_exit(); | |
141 | ||
142 | /* Clear the domain state to start fresh when back from idle. */ | |
143 | sbi_clear_domain_state(); | |
144 | return ret; | |
145 | } | |
146 | ||
147 | static int sbi_enter_domain_idle_state(struct cpuidle_device *dev, | |
148 | struct cpuidle_driver *drv, int idx) | |
149 | { | |
150 | return __sbi_enter_domain_idle_state(dev, drv, idx, false); | |
151 | } | |
152 | ||
153 | static int sbi_enter_s2idle_domain_idle_state(struct cpuidle_device *dev, | |
154 | struct cpuidle_driver *drv, | |
155 | int idx) | |
156 | { | |
157 | return __sbi_enter_domain_idle_state(dev, drv, idx, true); | |
158 | } | |
159 | ||
160 | static int sbi_cpuidle_cpuhp_up(unsigned int cpu) | |
161 | { | |
162 | struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev); | |
163 | ||
164 | if (pd_dev) | |
165 | pm_runtime_get_sync(pd_dev); | |
166 | ||
167 | return 0; | |
168 | } | |
169 | ||
170 | static int sbi_cpuidle_cpuhp_down(unsigned int cpu) | |
171 | { | |
172 | struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev); | |
173 | ||
174 | if (pd_dev) { | |
175 | pm_runtime_put_sync(pd_dev); | |
176 | /* Clear domain state to start fresh at next online. */ | |
177 | sbi_clear_domain_state(); | |
178 | } | |
179 | ||
180 | return 0; | |
181 | } | |
182 | ||
183 | static void sbi_idle_init_cpuhp(void) | |
184 | { | |
185 | int err; | |
186 | ||
187 | if (!sbi_cpuidle_use_cpuhp) | |
188 | return; | |
189 | ||
190 | err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING, | |
191 | "cpuidle/sbi:online", | |
192 | sbi_cpuidle_cpuhp_up, | |
193 | sbi_cpuidle_cpuhp_down); | |
194 | if (err) | |
195 | pr_warn("Failed %d while setup cpuhp state\n", err); | |
196 | } | |
197 | ||
198 | static const struct of_device_id sbi_cpuidle_state_match[] = { | |
199 | { .compatible = "riscv,idle-state", | |
200 | .data = sbi_cpuidle_enter_state }, | |
201 | { }, | |
202 | }; | |
203 | ||
204 | static bool sbi_suspend_state_is_valid(u32 state) | |
205 | { | |
206 | if (state > SBI_HSM_SUSPEND_RET_DEFAULT && | |
207 | state < SBI_HSM_SUSPEND_RET_PLATFORM) | |
208 | return false; | |
209 | if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT && | |
210 | state < SBI_HSM_SUSPEND_NON_RET_PLATFORM) | |
211 | return false; | |
212 | return true; | |
213 | } | |
214 | ||
215 | static int sbi_dt_parse_state_node(struct device_node *np, u32 *state) | |
216 | { | |
217 | int err = of_property_read_u32(np, "riscv,sbi-suspend-param", state); | |
218 | ||
219 | if (err) { | |
220 | pr_warn("%pOF missing riscv,sbi-suspend-param property\n", np); | |
221 | return err; | |
222 | } | |
223 | ||
224 | if (!sbi_suspend_state_is_valid(*state)) { | |
225 | pr_warn("Invalid SBI suspend state %#x\n", *state); | |
226 | return -EINVAL; | |
227 | } | |
228 | ||
229 | return 0; | |
230 | } | |
231 | ||
232 | static int sbi_dt_cpu_init_topology(struct cpuidle_driver *drv, | |
233 | struct sbi_cpuidle_data *data, | |
234 | unsigned int state_count, int cpu) | |
235 | { | |
236 | /* Currently limit the hierarchical topology to be used in OSI mode. */ | |
237 | if (!sbi_cpuidle_use_osi) | |
238 | return 0; | |
239 | ||
240 | data->dev = dt_idle_attach_cpu(cpu, "sbi"); | |
241 | if (IS_ERR_OR_NULL(data->dev)) | |
242 | return PTR_ERR_OR_ZERO(data->dev); | |
243 | ||
244 | /* | |
245 | * Using the deepest state for the CPU to trigger a potential selection | |
246 | * of a shared state for the domain, assumes the domain states are all | |
247 | * deeper states. | |
248 | */ | |
249 | drv->states[state_count - 1].enter = sbi_enter_domain_idle_state; | |
250 | drv->states[state_count - 1].enter_s2idle = | |
251 | sbi_enter_s2idle_domain_idle_state; | |
252 | sbi_cpuidle_use_cpuhp = true; | |
253 | ||
254 | return 0; | |
255 | } | |
256 | ||
257 | static int sbi_cpuidle_dt_init_states(struct device *dev, | |
258 | struct cpuidle_driver *drv, | |
259 | unsigned int cpu, | |
260 | unsigned int state_count) | |
261 | { | |
262 | struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu); | |
263 | struct device_node *state_node; | |
264 | struct device_node *cpu_node; | |
265 | u32 *states; | |
266 | int i, ret; | |
267 | ||
268 | cpu_node = of_cpu_device_node_get(cpu); | |
269 | if (!cpu_node) | |
270 | return -ENODEV; | |
271 | ||
272 | states = devm_kcalloc(dev, state_count, sizeof(*states), GFP_KERNEL); | |
273 | if (!states) { | |
274 | ret = -ENOMEM; | |
275 | goto fail; | |
276 | } | |
277 | ||
278 | /* Parse SBI specific details from state DT nodes */ | |
279 | for (i = 1; i < state_count; i++) { | |
280 | state_node = of_get_cpu_state_node(cpu_node, i - 1); | |
281 | if (!state_node) | |
282 | break; | |
283 | ||
284 | ret = sbi_dt_parse_state_node(state_node, &states[i]); | |
285 | of_node_put(state_node); | |
286 | ||
287 | if (ret) | |
288 | return ret; | |
289 | ||
290 | pr_debug("sbi-state %#x index %d\n", states[i], i); | |
291 | } | |
292 | if (i != state_count) { | |
293 | ret = -ENODEV; | |
294 | goto fail; | |
295 | } | |
296 | ||
297 | /* Initialize optional data, used for the hierarchical topology. */ | |
298 | ret = sbi_dt_cpu_init_topology(drv, data, state_count, cpu); | |
299 | if (ret < 0) | |
300 | return ret; | |
301 | ||
302 | /* Store states in the per-cpu struct. */ | |
303 | data->states = states; | |
304 | ||
305 | fail: | |
306 | of_node_put(cpu_node); | |
307 | ||
308 | return ret; | |
309 | } | |
310 | ||
311 | static void sbi_cpuidle_deinit_cpu(int cpu) | |
312 | { | |
313 | struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu); | |
314 | ||
315 | dt_idle_detach_cpu(data->dev); | |
316 | sbi_cpuidle_use_cpuhp = false; | |
317 | } | |
318 | ||
319 | static int sbi_cpuidle_init_cpu(struct device *dev, int cpu) | |
320 | { | |
321 | struct cpuidle_driver *drv; | |
322 | unsigned int state_count = 0; | |
323 | int ret = 0; | |
324 | ||
325 | drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL); | |
326 | if (!drv) | |
327 | return -ENOMEM; | |
328 | ||
329 | drv->name = "sbi_cpuidle"; | |
330 | drv->owner = THIS_MODULE; | |
331 | drv->cpumask = (struct cpumask *)cpumask_of(cpu); | |
332 | ||
333 | /* RISC-V architectural WFI to be represented as state index 0. */ | |
334 | drv->states[0].enter = sbi_cpuidle_enter_state; | |
335 | drv->states[0].exit_latency = 1; | |
336 | drv->states[0].target_residency = 1; | |
337 | drv->states[0].power_usage = UINT_MAX; | |
338 | strcpy(drv->states[0].name, "WFI"); | |
339 | strcpy(drv->states[0].desc, "RISC-V WFI"); | |
340 | ||
341 | /* | |
342 | * If no DT idle states are detected (ret == 0) let the driver | |
343 | * initialization fail accordingly since there is no reason to | |
344 | * initialize the idle driver if only wfi is supported, the | |
345 | * default archictectural back-end already executes wfi | |
346 | * on idle entry. | |
347 | */ | |
348 | ret = dt_init_idle_driver(drv, sbi_cpuidle_state_match, 1); | |
349 | if (ret <= 0) { | |
350 | pr_debug("HART%ld: failed to parse DT idle states\n", | |
351 | cpuid_to_hartid_map(cpu)); | |
352 | return ret ? : -ENODEV; | |
353 | } | |
354 | state_count = ret + 1; /* Include WFI state as well */ | |
355 | ||
356 | /* Initialize idle states from DT. */ | |
357 | ret = sbi_cpuidle_dt_init_states(dev, drv, cpu, state_count); | |
358 | if (ret) { | |
359 | pr_err("HART%ld: failed to init idle states\n", | |
360 | cpuid_to_hartid_map(cpu)); | |
361 | return ret; | |
362 | } | |
363 | ||
364 | ret = cpuidle_register(drv, NULL); | |
365 | if (ret) | |
366 | goto deinit; | |
367 | ||
368 | cpuidle_cooling_register(drv); | |
369 | ||
370 | return 0; | |
371 | deinit: | |
372 | sbi_cpuidle_deinit_cpu(cpu); | |
373 | return ret; | |
374 | } | |
375 | ||
376 | static void sbi_cpuidle_domain_sync_state(struct device *dev) | |
377 | { | |
378 | /* | |
379 | * All devices have now been attached/probed to the PM domain | |
380 | * topology, hence it's fine to allow domain states to be picked. | |
381 | */ | |
382 | sbi_cpuidle_pd_allow_domain_state = true; | |
383 | } | |
384 | ||
385 | #ifdef CONFIG_DT_IDLE_GENPD | |
386 | ||
387 | static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd) | |
388 | { | |
389 | struct genpd_power_state *state = &pd->states[pd->state_idx]; | |
390 | u32 *pd_state; | |
391 | ||
392 | if (!state->data) | |
393 | return 0; | |
394 | ||
395 | if (!sbi_cpuidle_pd_allow_domain_state) | |
396 | return -EBUSY; | |
397 | ||
398 | /* OSI mode is enabled, set the corresponding domain state. */ | |
399 | pd_state = state->data; | |
400 | sbi_set_domain_state(*pd_state); | |
401 | ||
402 | return 0; | |
403 | } | |
404 | ||
405 | struct sbi_pd_provider { | |
406 | struct list_head link; | |
407 | struct device_node *node; | |
408 | }; | |
409 | ||
410 | static LIST_HEAD(sbi_pd_providers); | |
411 | ||
412 | static int sbi_pd_init(struct device_node *np) | |
413 | { | |
414 | struct generic_pm_domain *pd; | |
415 | struct sbi_pd_provider *pd_provider; | |
416 | struct dev_power_governor *pd_gov; | |
a6653fb5 | 417 | int ret = -ENOMEM; |
6abf32f1 AP |
418 | |
419 | pd = dt_idle_pd_alloc(np, sbi_dt_parse_state_node); | |
420 | if (!pd) | |
421 | goto out; | |
422 | ||
423 | pd_provider = kzalloc(sizeof(*pd_provider), GFP_KERNEL); | |
424 | if (!pd_provider) | |
425 | goto free_pd; | |
426 | ||
427 | pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN; | |
428 | ||
429 | /* Allow power off when OSI is available. */ | |
430 | if (sbi_cpuidle_use_osi) | |
431 | pd->power_off = sbi_cpuidle_pd_power_off; | |
432 | else | |
433 | pd->flags |= GENPD_FLAG_ALWAYS_ON; | |
434 | ||
435 | /* Use governor for CPU PM domains if it has some states to manage. */ | |
a6653fb5 | 436 | pd_gov = pd->states ? &pm_domain_cpu_gov : NULL; |
6abf32f1 AP |
437 | |
438 | ret = pm_genpd_init(pd, pd_gov, false); | |
439 | if (ret) | |
440 | goto free_pd_prov; | |
441 | ||
442 | ret = of_genpd_add_provider_simple(np, pd); | |
443 | if (ret) | |
444 | goto remove_pd; | |
445 | ||
446 | pd_provider->node = of_node_get(np); | |
447 | list_add(&pd_provider->link, &sbi_pd_providers); | |
448 | ||
449 | pr_debug("init PM domain %s\n", pd->name); | |
450 | return 0; | |
451 | ||
452 | remove_pd: | |
453 | pm_genpd_remove(pd); | |
454 | free_pd_prov: | |
455 | kfree(pd_provider); | |
456 | free_pd: | |
457 | dt_idle_pd_free(pd); | |
458 | out: | |
459 | pr_err("failed to init PM domain ret=%d %pOF\n", ret, np); | |
460 | return ret; | |
461 | } | |
462 | ||
463 | static void sbi_pd_remove(void) | |
464 | { | |
465 | struct sbi_pd_provider *pd_provider, *it; | |
466 | struct generic_pm_domain *genpd; | |
467 | ||
468 | list_for_each_entry_safe(pd_provider, it, &sbi_pd_providers, link) { | |
469 | of_genpd_del_provider(pd_provider->node); | |
470 | ||
471 | genpd = of_genpd_remove_last(pd_provider->node); | |
472 | if (!IS_ERR(genpd)) | |
473 | kfree(genpd); | |
474 | ||
475 | of_node_put(pd_provider->node); | |
476 | list_del(&pd_provider->link); | |
477 | kfree(pd_provider); | |
478 | } | |
479 | } | |
480 | ||
481 | static int sbi_genpd_probe(struct device_node *np) | |
482 | { | |
483 | struct device_node *node; | |
484 | int ret = 0, pd_count = 0; | |
485 | ||
486 | if (!np) | |
487 | return -ENODEV; | |
488 | ||
489 | /* | |
490 | * Parse child nodes for the "#power-domain-cells" property and | |
491 | * initialize a genpd/genpd-of-provider pair when it's found. | |
492 | */ | |
493 | for_each_child_of_node(np, node) { | |
494 | if (!of_find_property(node, "#power-domain-cells", NULL)) | |
495 | continue; | |
496 | ||
497 | ret = sbi_pd_init(node); | |
498 | if (ret) | |
499 | goto put_node; | |
500 | ||
501 | pd_count++; | |
502 | } | |
503 | ||
504 | /* Bail out if not using the hierarchical CPU topology. */ | |
505 | if (!pd_count) | |
506 | goto no_pd; | |
507 | ||
508 | /* Link genpd masters/subdomains to model the CPU topology. */ | |
509 | ret = dt_idle_pd_init_topology(np); | |
510 | if (ret) | |
511 | goto remove_pd; | |
512 | ||
513 | return 0; | |
514 | ||
515 | put_node: | |
516 | of_node_put(node); | |
517 | remove_pd: | |
518 | sbi_pd_remove(); | |
519 | pr_err("failed to create CPU PM domains ret=%d\n", ret); | |
520 | no_pd: | |
521 | return ret; | |
522 | } | |
523 | ||
524 | #else | |
525 | ||
526 | static inline int sbi_genpd_probe(struct device_node *np) | |
527 | { | |
528 | return 0; | |
529 | } | |
530 | ||
531 | #endif | |
532 | ||
533 | static int sbi_cpuidle_probe(struct platform_device *pdev) | |
534 | { | |
535 | int cpu, ret; | |
536 | struct cpuidle_driver *drv; | |
537 | struct cpuidle_device *dev; | |
538 | struct device_node *np, *pds_node; | |
539 | ||
540 | /* Detect OSI support based on CPU DT nodes */ | |
541 | sbi_cpuidle_use_osi = true; | |
542 | for_each_possible_cpu(cpu) { | |
543 | np = of_cpu_device_node_get(cpu); | |
544 | if (np && | |
545 | of_find_property(np, "power-domains", NULL) && | |
546 | of_find_property(np, "power-domain-names", NULL)) { | |
547 | continue; | |
548 | } else { | |
549 | sbi_cpuidle_use_osi = false; | |
550 | break; | |
551 | } | |
552 | } | |
553 | ||
554 | /* Populate generic power domains from DT nodes */ | |
555 | pds_node = of_find_node_by_path("/cpus/power-domains"); | |
556 | if (pds_node) { | |
557 | ret = sbi_genpd_probe(pds_node); | |
558 | of_node_put(pds_node); | |
559 | if (ret) | |
560 | return ret; | |
561 | } | |
562 | ||
563 | /* Initialize CPU idle driver for each CPU */ | |
564 | for_each_possible_cpu(cpu) { | |
565 | ret = sbi_cpuidle_init_cpu(&pdev->dev, cpu); | |
566 | if (ret) { | |
567 | pr_debug("HART%ld: idle driver init failed\n", | |
568 | cpuid_to_hartid_map(cpu)); | |
569 | goto out_fail; | |
570 | } | |
571 | } | |
572 | ||
573 | /* Setup CPU hotplut notifiers */ | |
574 | sbi_idle_init_cpuhp(); | |
575 | ||
576 | pr_info("idle driver registered for all CPUs\n"); | |
577 | ||
578 | return 0; | |
579 | ||
580 | out_fail: | |
581 | while (--cpu >= 0) { | |
582 | dev = per_cpu(cpuidle_devices, cpu); | |
583 | drv = cpuidle_get_cpu_driver(dev); | |
584 | cpuidle_unregister(drv); | |
585 | sbi_cpuidle_deinit_cpu(cpu); | |
586 | } | |
587 | ||
588 | return ret; | |
589 | } | |
590 | ||
591 | static struct platform_driver sbi_cpuidle_driver = { | |
592 | .probe = sbi_cpuidle_probe, | |
593 | .driver = { | |
594 | .name = "sbi-cpuidle", | |
595 | .sync_state = sbi_cpuidle_domain_sync_state, | |
596 | }, | |
597 | }; | |
598 | ||
599 | static int __init sbi_cpuidle_init(void) | |
600 | { | |
601 | int ret; | |
602 | struct platform_device *pdev; | |
603 | ||
604 | /* | |
605 | * The SBI HSM suspend function is only available when: | |
606 | * 1) SBI version is 0.3 or higher | |
607 | * 2) SBI HSM extension is available | |
608 | */ | |
609 | if ((sbi_spec_version < sbi_mk_version(0, 3)) || | |
610 | sbi_probe_extension(SBI_EXT_HSM) <= 0) { | |
611 | pr_info("HSM suspend not available\n"); | |
612 | return 0; | |
613 | } | |
614 | ||
615 | ret = platform_driver_register(&sbi_cpuidle_driver); | |
616 | if (ret) | |
617 | return ret; | |
618 | ||
619 | pdev = platform_device_register_simple("sbi-cpuidle", | |
620 | -1, NULL, 0); | |
621 | if (IS_ERR(pdev)) { | |
622 | platform_driver_unregister(&sbi_cpuidle_driver); | |
623 | return PTR_ERR(pdev); | |
624 | } | |
625 | ||
626 | return 0; | |
627 | } | |
628 | device_initcall(sbi_cpuidle_init); |