PM / Domains: Rename GPD_STATE_WAIT_PARENT to GPD_STATE_WAIT_MASTER
[linux-2.6-block.git] / drivers / base / power / domain.c
CommitLineData
f721889f
RW
1/*
2 * drivers/base/power/domain.c - Common code related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/io.h>
12#include <linux/pm_runtime.h>
13#include <linux/pm_domain.h>
14#include <linux/slab.h>
15#include <linux/err.h>
17b75eca
RW
16#include <linux/sched.h>
17#include <linux/suspend.h>
f721889f 18
5125bbf3
RW
19static LIST_HEAD(gpd_list);
20static DEFINE_MUTEX(gpd_list_lock);
21
5248051b
RW
22#ifdef CONFIG_PM
23
24static struct generic_pm_domain *dev_to_genpd(struct device *dev)
25{
26 if (IS_ERR_OR_NULL(dev->pm_domain))
27 return ERR_PTR(-EINVAL);
28
596ba34b 29 return pd_to_genpd(dev->pm_domain);
5248051b 30}
f721889f 31
c4bb3160 32static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
f721889f 33{
c4bb3160
RW
34 bool ret = false;
35
36 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
37 ret = !!atomic_dec_and_test(&genpd->sd_count);
38
39 return ret;
40}
41
42static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
43{
44 atomic_inc(&genpd->sd_count);
45 smp_mb__after_atomic_inc();
f721889f
RW
46}
47
17b75eca
RW
48static void genpd_acquire_lock(struct generic_pm_domain *genpd)
49{
50 DEFINE_WAIT(wait);
51
52 mutex_lock(&genpd->lock);
53 /*
54 * Wait for the domain to transition into either the active,
55 * or the power off state.
56 */
57 for (;;) {
58 prepare_to_wait(&genpd->status_wait_queue, &wait,
59 TASK_UNINTERRUPTIBLE);
c6d22b37
RW
60 if (genpd->status == GPD_STATE_ACTIVE
61 || genpd->status == GPD_STATE_POWER_OFF)
17b75eca
RW
62 break;
63 mutex_unlock(&genpd->lock);
64
65 schedule();
66
67 mutex_lock(&genpd->lock);
68 }
69 finish_wait(&genpd->status_wait_queue, &wait);
70}
71
72static void genpd_release_lock(struct generic_pm_domain *genpd)
73{
74 mutex_unlock(&genpd->lock);
75}
76
c6d22b37
RW
77static void genpd_set_active(struct generic_pm_domain *genpd)
78{
79 if (genpd->resume_count == 0)
80 genpd->status = GPD_STATE_ACTIVE;
81}
82
5248051b 83/**
5063ce15 84 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
5248051b
RW
85 * @genpd: PM domain to power up.
86 *
5063ce15 87 * Restore power to @genpd and all of its masters so that it is possible to
5248051b
RW
88 * resume a device belonging to it.
89 */
3f241775
RW
90int __pm_genpd_poweron(struct generic_pm_domain *genpd)
91 __releases(&genpd->lock) __acquires(&genpd->lock)
5248051b 92{
5063ce15 93 struct gpd_link *link;
3f241775 94 DEFINE_WAIT(wait);
5248051b
RW
95 int ret = 0;
96
5063ce15 97 /* If the domain's master is being waited for, we have to wait too. */
3f241775
RW
98 for (;;) {
99 prepare_to_wait(&genpd->status_wait_queue, &wait,
100 TASK_UNINTERRUPTIBLE);
17877eb5 101 if (genpd->status != GPD_STATE_WAIT_MASTER)
3f241775
RW
102 break;
103 mutex_unlock(&genpd->lock);
17b75eca 104
3f241775
RW
105 schedule();
106
107 mutex_lock(&genpd->lock);
108 }
109 finish_wait(&genpd->status_wait_queue, &wait);
9e08cf42 110
17b75eca 111 if (genpd->status == GPD_STATE_ACTIVE
596ba34b 112 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
3f241775 113 return 0;
5248051b 114
c6d22b37
RW
115 if (genpd->status != GPD_STATE_POWER_OFF) {
116 genpd_set_active(genpd);
3f241775 117 return 0;
c6d22b37
RW
118 }
119
5063ce15
RW
120 /*
121 * The list is guaranteed not to change while the loop below is being
122 * executed, unless one of the masters' .power_on() callbacks fiddles
123 * with it.
124 */
125 list_for_each_entry(link, &genpd->slave_links, slave_node) {
126 genpd_sd_counter_inc(link->master);
17877eb5 127 genpd->status = GPD_STATE_WAIT_MASTER;
3c07cbc4 128
5248051b 129 mutex_unlock(&genpd->lock);
5248051b 130
5063ce15 131 ret = pm_genpd_poweron(link->master);
9e08cf42
RW
132
133 mutex_lock(&genpd->lock);
134
3f241775
RW
135 /*
136 * The "wait for parent" status is guaranteed not to change
5063ce15 137 * while the master is powering on.
3f241775
RW
138 */
139 genpd->status = GPD_STATE_POWER_OFF;
140 wake_up_all(&genpd->status_wait_queue);
5063ce15
RW
141 if (ret) {
142 genpd_sd_counter_dec(link->master);
9e08cf42 143 goto err;
5063ce15 144 }
5248051b
RW
145 }
146
9e08cf42 147 if (genpd->power_on) {
fe202fde 148 ret = genpd->power_on(genpd);
9e08cf42
RW
149 if (ret)
150 goto err;
3c07cbc4 151 }
5248051b 152
9e08cf42
RW
153 genpd_set_active(genpd);
154
3f241775 155 return 0;
9e08cf42
RW
156
157 err:
5063ce15
RW
158 list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
159 genpd_sd_counter_dec(link->master);
9e08cf42 160
3f241775
RW
161 return ret;
162}
163
164/**
5063ce15 165 * pm_genpd_poweron - Restore power to a given PM domain and its masters.
3f241775
RW
166 * @genpd: PM domain to power up.
167 */
168int pm_genpd_poweron(struct generic_pm_domain *genpd)
169{
170 int ret;
171
172 mutex_lock(&genpd->lock);
173 ret = __pm_genpd_poweron(genpd);
174 mutex_unlock(&genpd->lock);
175 return ret;
5248051b
RW
176}
177
178#endif /* CONFIG_PM */
179
180#ifdef CONFIG_PM_RUNTIME
181
f721889f
RW
182/**
183 * __pm_genpd_save_device - Save the pre-suspend state of a device.
184 * @dle: Device list entry of the device to save the state of.
185 * @genpd: PM domain the device belongs to.
186 */
187static int __pm_genpd_save_device(struct dev_list_entry *dle,
188 struct generic_pm_domain *genpd)
17b75eca 189 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f
RW
190{
191 struct device *dev = dle->dev;
192 struct device_driver *drv = dev->driver;
193 int ret = 0;
194
195 if (dle->need_restore)
196 return 0;
197
17b75eca
RW
198 mutex_unlock(&genpd->lock);
199
f721889f
RW
200 if (drv && drv->pm && drv->pm->runtime_suspend) {
201 if (genpd->start_device)
202 genpd->start_device(dev);
203
204 ret = drv->pm->runtime_suspend(dev);
205
206 if (genpd->stop_device)
207 genpd->stop_device(dev);
208 }
209
17b75eca
RW
210 mutex_lock(&genpd->lock);
211
f721889f
RW
212 if (!ret)
213 dle->need_restore = true;
214
215 return ret;
216}
217
218/**
219 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
220 * @dle: Device list entry of the device to restore the state of.
221 * @genpd: PM domain the device belongs to.
222 */
223static void __pm_genpd_restore_device(struct dev_list_entry *dle,
224 struct generic_pm_domain *genpd)
17b75eca 225 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f
RW
226{
227 struct device *dev = dle->dev;
228 struct device_driver *drv = dev->driver;
229
230 if (!dle->need_restore)
231 return;
232
17b75eca
RW
233 mutex_unlock(&genpd->lock);
234
f721889f
RW
235 if (drv && drv->pm && drv->pm->runtime_resume) {
236 if (genpd->start_device)
237 genpd->start_device(dev);
238
239 drv->pm->runtime_resume(dev);
240
241 if (genpd->stop_device)
242 genpd->stop_device(dev);
243 }
244
17b75eca
RW
245 mutex_lock(&genpd->lock);
246
f721889f
RW
247 dle->need_restore = false;
248}
249
c6d22b37
RW
250/**
251 * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
252 * @genpd: PM domain to check.
253 *
254 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
255 * a "power off" operation, which means that a "power on" has occured in the
256 * meantime, or if its resume_count field is different from zero, which means
257 * that one of its devices has been resumed in the meantime.
258 */
259static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
260{
17877eb5 261 return genpd->status == GPD_STATE_WAIT_MASTER
3f241775 262 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
c6d22b37
RW
263}
264
56375fd4
RW
265/**
266 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
267 * @genpd: PM domait to power off.
268 *
269 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
270 * before.
271 */
0bc5b2de 272void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
56375fd4
RW
273{
274 if (!work_pending(&genpd->power_off_work))
275 queue_work(pm_wq, &genpd->power_off_work);
276}
277
f721889f
RW
278/**
279 * pm_genpd_poweroff - Remove power from a given PM domain.
280 * @genpd: PM domain to power down.
281 *
282 * If all of the @genpd's devices have been suspended and all of its subdomains
283 * have been powered down, run the runtime suspend callbacks provided by all of
284 * the @genpd's devices' drivers and remove power from @genpd.
285 */
286static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
17b75eca 287 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f 288{
f721889f 289 struct dev_list_entry *dle;
5063ce15 290 struct gpd_link *link;
f721889f 291 unsigned int not_suspended;
c6d22b37 292 int ret = 0;
f721889f 293
c6d22b37
RW
294 start:
295 /*
296 * Do not try to power off the domain in the following situations:
297 * (1) The domain is already in the "power off" state.
5063ce15 298 * (2) The domain is waiting for its master to power up.
c6d22b37 299 * (3) One of the domain's devices is being resumed right now.
3f241775 300 * (4) System suspend is in progress.
c6d22b37 301 */
3f241775 302 if (genpd->status == GPD_STATE_POWER_OFF
17877eb5 303 || genpd->status == GPD_STATE_WAIT_MASTER
3f241775 304 || genpd->resume_count > 0 || genpd->prepared_count > 0)
f721889f
RW
305 return 0;
306
c4bb3160 307 if (atomic_read(&genpd->sd_count) > 0)
f721889f
RW
308 return -EBUSY;
309
310 not_suspended = 0;
311 list_for_each_entry(dle, &genpd->dev_list, node)
312 if (dle->dev->driver && !pm_runtime_suspended(dle->dev))
313 not_suspended++;
314
315 if (not_suspended > genpd->in_progress)
316 return -EBUSY;
317
c6d22b37
RW
318 if (genpd->poweroff_task) {
319 /*
320 * Another instance of pm_genpd_poweroff() is executing
321 * callbacks, so tell it to start over and return.
322 */
323 genpd->status = GPD_STATE_REPEAT;
324 return 0;
325 }
326
f721889f
RW
327 if (genpd->gov && genpd->gov->power_down_ok) {
328 if (!genpd->gov->power_down_ok(&genpd->domain))
329 return -EAGAIN;
330 }
331
17b75eca 332 genpd->status = GPD_STATE_BUSY;
c6d22b37 333 genpd->poweroff_task = current;
17b75eca 334
f721889f 335 list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
3c07cbc4
RW
336 ret = atomic_read(&genpd->sd_count) == 0 ?
337 __pm_genpd_save_device(dle, genpd) : -EBUSY;
3f241775
RW
338
339 if (genpd_abort_poweroff(genpd))
340 goto out;
341
697a7f37
RW
342 if (ret) {
343 genpd_set_active(genpd);
344 goto out;
345 }
f721889f 346
c6d22b37
RW
347 if (genpd->status == GPD_STATE_REPEAT) {
348 genpd->poweroff_task = NULL;
349 goto start;
350 }
351 }
17b75eca 352
3c07cbc4
RW
353 if (genpd->power_off) {
354 if (atomic_read(&genpd->sd_count) > 0) {
355 ret = -EBUSY;
c6d22b37
RW
356 goto out;
357 }
17b75eca 358
3c07cbc4 359 /*
5063ce15
RW
360 * If sd_count > 0 at this point, one of the subdomains hasn't
361 * managed to call pm_genpd_poweron() for the master yet after
3c07cbc4
RW
362 * incrementing it. In that case pm_genpd_poweron() will wait
363 * for us to drop the lock, so we can call .power_off() and let
364 * the pm_genpd_poweron() restore power for us (this shouldn't
365 * happen very often).
366 */
d2805402
RW
367 ret = genpd->power_off(genpd);
368 if (ret == -EBUSY) {
369 genpd_set_active(genpd);
d2805402
RW
370 goto out;
371 }
372 }
f721889f 373
17b75eca 374 genpd->status = GPD_STATE_POWER_OFF;
f721889f 375
5063ce15
RW
376 list_for_each_entry(link, &genpd->slave_links, slave_node) {
377 genpd_sd_counter_dec(link->master);
378 genpd_queue_power_off_work(link->master);
379 }
f721889f 380
c6d22b37
RW
381 out:
382 genpd->poweroff_task = NULL;
383 wake_up_all(&genpd->status_wait_queue);
384 return ret;
f721889f
RW
385}
386
387/**
388 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
389 * @work: Work structure used for scheduling the execution of this function.
390 */
391static void genpd_power_off_work_fn(struct work_struct *work)
392{
393 struct generic_pm_domain *genpd;
394
395 genpd = container_of(work, struct generic_pm_domain, power_off_work);
396
17b75eca 397 genpd_acquire_lock(genpd);
f721889f 398 pm_genpd_poweroff(genpd);
17b75eca 399 genpd_release_lock(genpd);
f721889f
RW
400}
401
402/**
403 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
404 * @dev: Device to suspend.
405 *
406 * Carry out a runtime suspend of a device under the assumption that its
407 * pm_domain field points to the domain member of an object of type
408 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
409 */
410static int pm_genpd_runtime_suspend(struct device *dev)
411{
412 struct generic_pm_domain *genpd;
413
414 dev_dbg(dev, "%s()\n", __func__);
415
5248051b
RW
416 genpd = dev_to_genpd(dev);
417 if (IS_ERR(genpd))
f721889f
RW
418 return -EINVAL;
419
f721889f
RW
420 if (genpd->stop_device) {
421 int ret = genpd->stop_device(dev);
422 if (ret)
17b75eca 423 return ret;
f721889f 424 }
17b75eca 425
c6d22b37 426 mutex_lock(&genpd->lock);
f721889f
RW
427 genpd->in_progress++;
428 pm_genpd_poweroff(genpd);
429 genpd->in_progress--;
c6d22b37 430 mutex_unlock(&genpd->lock);
f721889f
RW
431
432 return 0;
433}
434
596ba34b
RW
435/**
436 * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
437 * @dev: Device to resume.
438 * @genpd: PM domain the device belongs to.
439 */
440static void __pm_genpd_runtime_resume(struct device *dev,
441 struct generic_pm_domain *genpd)
442{
443 struct dev_list_entry *dle;
444
445 list_for_each_entry(dle, &genpd->dev_list, node) {
446 if (dle->dev == dev) {
447 __pm_genpd_restore_device(dle, genpd);
448 break;
449 }
450 }
596ba34b
RW
451}
452
f721889f
RW
453/**
454 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
455 * @dev: Device to resume.
456 *
457 * Carry out a runtime resume of a device under the assumption that its
458 * pm_domain field points to the domain member of an object of type
459 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
460 */
461static int pm_genpd_runtime_resume(struct device *dev)
462{
463 struct generic_pm_domain *genpd;
c6d22b37 464 DEFINE_WAIT(wait);
f721889f
RW
465 int ret;
466
467 dev_dbg(dev, "%s()\n", __func__);
468
5248051b
RW
469 genpd = dev_to_genpd(dev);
470 if (IS_ERR(genpd))
f721889f
RW
471 return -EINVAL;
472
c6d22b37 473 mutex_lock(&genpd->lock);
3f241775
RW
474 ret = __pm_genpd_poweron(genpd);
475 if (ret) {
476 mutex_unlock(&genpd->lock);
477 return ret;
478 }
17b75eca 479 genpd->status = GPD_STATE_BUSY;
c6d22b37
RW
480 genpd->resume_count++;
481 for (;;) {
482 prepare_to_wait(&genpd->status_wait_queue, &wait,
483 TASK_UNINTERRUPTIBLE);
484 /*
485 * If current is the powering off task, we have been called
486 * reentrantly from one of the device callbacks, so we should
487 * not wait.
488 */
489 if (!genpd->poweroff_task || genpd->poweroff_task == current)
490 break;
491 mutex_unlock(&genpd->lock);
492
493 schedule();
494
495 mutex_lock(&genpd->lock);
496 }
497 finish_wait(&genpd->status_wait_queue, &wait);
596ba34b 498 __pm_genpd_runtime_resume(dev, genpd);
c6d22b37
RW
499 genpd->resume_count--;
500 genpd_set_active(genpd);
17b75eca 501 wake_up_all(&genpd->status_wait_queue);
c6d22b37 502 mutex_unlock(&genpd->lock);
17b75eca
RW
503
504 if (genpd->start_device)
505 genpd->start_device(dev);
f721889f
RW
506
507 return 0;
508}
509
17f2ae7f
RW
510/**
511 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
512 */
513void pm_genpd_poweroff_unused(void)
514{
515 struct generic_pm_domain *genpd;
516
517 mutex_lock(&gpd_list_lock);
518
519 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
520 genpd_queue_power_off_work(genpd);
521
522 mutex_unlock(&gpd_list_lock);
523}
524
f721889f
RW
525#else
526
527static inline void genpd_power_off_work_fn(struct work_struct *work) {}
596ba34b
RW
528static inline void __pm_genpd_runtime_resume(struct device *dev,
529 struct generic_pm_domain *genpd) {}
f721889f
RW
530
531#define pm_genpd_runtime_suspend NULL
532#define pm_genpd_runtime_resume NULL
533
534#endif /* CONFIG_PM_RUNTIME */
535
596ba34b
RW
536#ifdef CONFIG_PM_SLEEP
537
538/**
5063ce15 539 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
596ba34b
RW
540 * @genpd: PM domain to power off, if possible.
541 *
542 * Check if the given PM domain can be powered off (during system suspend or
5063ce15 543 * hibernation) and do that if so. Also, in that case propagate to its masters.
596ba34b
RW
544 *
545 * This function is only called in "noirq" stages of system power transitions,
546 * so it need not acquire locks (all of the "noirq" callbacks are executed
547 * sequentially, so it is guaranteed that it will never run twice in parallel).
548 */
549static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
550{
5063ce15 551 struct gpd_link *link;
596ba34b 552
17b75eca 553 if (genpd->status == GPD_STATE_POWER_OFF)
596ba34b
RW
554 return;
555
c4bb3160
RW
556 if (genpd->suspended_count != genpd->device_count
557 || atomic_read(&genpd->sd_count) > 0)
596ba34b
RW
558 return;
559
560 if (genpd->power_off)
561 genpd->power_off(genpd);
562
17b75eca 563 genpd->status = GPD_STATE_POWER_OFF;
5063ce15
RW
564
565 list_for_each_entry(link, &genpd->slave_links, slave_node) {
566 genpd_sd_counter_dec(link->master);
567 pm_genpd_sync_poweroff(link->master);
596ba34b
RW
568 }
569}
570
4ecd6e65
RW
571/**
572 * resume_needed - Check whether to resume a device before system suspend.
573 * @dev: Device to check.
574 * @genpd: PM domain the device belongs to.
575 *
576 * There are two cases in which a device that can wake up the system from sleep
577 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
578 * to wake up the system and it has to remain active for this purpose while the
579 * system is in the sleep state and (2) if the device is not enabled to wake up
580 * the system from sleep states and it generally doesn't generate wakeup signals
581 * by itself (those signals are generated on its behalf by other parts of the
582 * system). In the latter case it may be necessary to reconfigure the device's
583 * wakeup settings during system suspend, because it may have been set up to
584 * signal remote wakeup from the system's working state as needed by runtime PM.
585 * Return 'true' in either of the above cases.
586 */
587static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
588{
589 bool active_wakeup;
590
591 if (!device_can_wakeup(dev))
592 return false;
593
594 active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev);
595 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
596}
597
596ba34b
RW
598/**
599 * pm_genpd_prepare - Start power transition of a device in a PM domain.
600 * @dev: Device to start the transition of.
601 *
602 * Start a power transition of a device (during a system-wide power transition)
603 * under the assumption that its pm_domain field points to the domain member of
604 * an object of type struct generic_pm_domain representing a PM domain
605 * consisting of I/O devices.
606 */
607static int pm_genpd_prepare(struct device *dev)
608{
609 struct generic_pm_domain *genpd;
b6c10c84 610 int ret;
596ba34b
RW
611
612 dev_dbg(dev, "%s()\n", __func__);
613
614 genpd = dev_to_genpd(dev);
615 if (IS_ERR(genpd))
616 return -EINVAL;
617
17b75eca
RW
618 /*
619 * If a wakeup request is pending for the device, it should be woken up
620 * at this point and a system wakeup event should be reported if it's
621 * set up to wake up the system from sleep states.
622 */
623 pm_runtime_get_noresume(dev);
624 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
625 pm_wakeup_event(dev, 0);
626
627 if (pm_wakeup_pending()) {
628 pm_runtime_put_sync(dev);
629 return -EBUSY;
630 }
631
4ecd6e65
RW
632 if (resume_needed(dev, genpd))
633 pm_runtime_resume(dev);
634
17b75eca 635 genpd_acquire_lock(genpd);
596ba34b
RW
636
637 if (genpd->prepared_count++ == 0)
17b75eca
RW
638 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
639
640 genpd_release_lock(genpd);
596ba34b
RW
641
642 if (genpd->suspend_power_off) {
17b75eca 643 pm_runtime_put_noidle(dev);
596ba34b
RW
644 return 0;
645 }
646
647 /*
17b75eca
RW
648 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
649 * so pm_genpd_poweron() will return immediately, but if the device
650 * is suspended (e.g. it's been stopped by .stop_device()), we need
651 * to make it operational.
596ba34b 652 */
17b75eca 653 pm_runtime_resume(dev);
596ba34b
RW
654 __pm_runtime_disable(dev, false);
655
b6c10c84
RW
656 ret = pm_generic_prepare(dev);
657 if (ret) {
658 mutex_lock(&genpd->lock);
659
660 if (--genpd->prepared_count == 0)
661 genpd->suspend_power_off = false;
662
663 mutex_unlock(&genpd->lock);
17b75eca 664 pm_runtime_enable(dev);
b6c10c84 665 }
17b75eca
RW
666
667 pm_runtime_put_sync(dev);
b6c10c84 668 return ret;
596ba34b
RW
669}
670
671/**
672 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
673 * @dev: Device to suspend.
674 *
675 * Suspend a device under the assumption that its pm_domain field points to the
676 * domain member of an object of type struct generic_pm_domain representing
677 * a PM domain consisting of I/O devices.
678 */
679static int pm_genpd_suspend(struct device *dev)
680{
681 struct generic_pm_domain *genpd;
682
683 dev_dbg(dev, "%s()\n", __func__);
684
685 genpd = dev_to_genpd(dev);
686 if (IS_ERR(genpd))
687 return -EINVAL;
688
689 return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
690}
691
692/**
693 * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
694 * @dev: Device to suspend.
695 *
696 * Carry out a late suspend of a device under the assumption that its
697 * pm_domain field points to the domain member of an object of type
698 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
699 */
700static int pm_genpd_suspend_noirq(struct device *dev)
701{
702 struct generic_pm_domain *genpd;
703 int ret;
704
705 dev_dbg(dev, "%s()\n", __func__);
706
707 genpd = dev_to_genpd(dev);
708 if (IS_ERR(genpd))
709 return -EINVAL;
710
711 if (genpd->suspend_power_off)
712 return 0;
713
714 ret = pm_generic_suspend_noirq(dev);
715 if (ret)
716 return ret;
717
d4f2d87a
RW
718 if (device_may_wakeup(dev)
719 && genpd->active_wakeup && genpd->active_wakeup(dev))
720 return 0;
721
596ba34b
RW
722 if (genpd->stop_device)
723 genpd->stop_device(dev);
724
725 /*
726 * Since all of the "noirq" callbacks are executed sequentially, it is
727 * guaranteed that this function will never run twice in parallel for
728 * the same PM domain, so it is not necessary to use locking here.
729 */
730 genpd->suspended_count++;
731 pm_genpd_sync_poweroff(genpd);
732
733 return 0;
734}
735
736/**
737 * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
738 * @dev: Device to resume.
739 *
740 * Carry out an early resume of a device under the assumption that its
741 * pm_domain field points to the domain member of an object of type
742 * struct generic_pm_domain representing a power domain consisting of I/O
743 * devices.
744 */
745static int pm_genpd_resume_noirq(struct device *dev)
746{
747 struct generic_pm_domain *genpd;
748
749 dev_dbg(dev, "%s()\n", __func__);
750
751 genpd = dev_to_genpd(dev);
752 if (IS_ERR(genpd))
753 return -EINVAL;
754
755 if (genpd->suspend_power_off)
756 return 0;
757
758 /*
759 * Since all of the "noirq" callbacks are executed sequentially, it is
760 * guaranteed that this function will never run twice in parallel for
761 * the same PM domain, so it is not necessary to use locking here.
762 */
763 pm_genpd_poweron(genpd);
764 genpd->suspended_count--;
765 if (genpd->start_device)
766 genpd->start_device(dev);
767
768 return pm_generic_resume_noirq(dev);
769}
770
771/**
772 * pm_genpd_resume - Resume a device belonging to an I/O power domain.
773 * @dev: Device to resume.
774 *
775 * Resume a device under the assumption that its pm_domain field points to the
776 * domain member of an object of type struct generic_pm_domain representing
777 * a power domain consisting of I/O devices.
778 */
779static int pm_genpd_resume(struct device *dev)
780{
781 struct generic_pm_domain *genpd;
782
783 dev_dbg(dev, "%s()\n", __func__);
784
785 genpd = dev_to_genpd(dev);
786 if (IS_ERR(genpd))
787 return -EINVAL;
788
789 return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
790}
791
792/**
793 * pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
794 * @dev: Device to freeze.
795 *
796 * Freeze a device under the assumption that its pm_domain field points to the
797 * domain member of an object of type struct generic_pm_domain representing
798 * a power domain consisting of I/O devices.
799 */
800static int pm_genpd_freeze(struct device *dev)
801{
802 struct generic_pm_domain *genpd;
803
804 dev_dbg(dev, "%s()\n", __func__);
805
806 genpd = dev_to_genpd(dev);
807 if (IS_ERR(genpd))
808 return -EINVAL;
809
810 return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
811}
812
813/**
814 * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
815 * @dev: Device to freeze.
816 *
817 * Carry out a late freeze of a device under the assumption that its
818 * pm_domain field points to the domain member of an object of type
819 * struct generic_pm_domain representing a power domain consisting of I/O
820 * devices.
821 */
822static int pm_genpd_freeze_noirq(struct device *dev)
823{
824 struct generic_pm_domain *genpd;
825 int ret;
826
827 dev_dbg(dev, "%s()\n", __func__);
828
829 genpd = dev_to_genpd(dev);
830 if (IS_ERR(genpd))
831 return -EINVAL;
832
833 if (genpd->suspend_power_off)
834 return 0;
835
836 ret = pm_generic_freeze_noirq(dev);
837 if (ret)
838 return ret;
839
840 if (genpd->stop_device)
841 genpd->stop_device(dev);
842
843 return 0;
844}
845
846/**
847 * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
848 * @dev: Device to thaw.
849 *
850 * Carry out an early thaw of a device under the assumption that its
851 * pm_domain field points to the domain member of an object of type
852 * struct generic_pm_domain representing a power domain consisting of I/O
853 * devices.
854 */
855static int pm_genpd_thaw_noirq(struct device *dev)
856{
857 struct generic_pm_domain *genpd;
858
859 dev_dbg(dev, "%s()\n", __func__);
860
861 genpd = dev_to_genpd(dev);
862 if (IS_ERR(genpd))
863 return -EINVAL;
864
865 if (genpd->suspend_power_off)
866 return 0;
867
868 if (genpd->start_device)
869 genpd->start_device(dev);
870
871 return pm_generic_thaw_noirq(dev);
872}
873
874/**
875 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
876 * @dev: Device to thaw.
877 *
878 * Thaw a device under the assumption that its pm_domain field points to the
879 * domain member of an object of type struct generic_pm_domain representing
880 * a power domain consisting of I/O devices.
881 */
882static int pm_genpd_thaw(struct device *dev)
883{
884 struct generic_pm_domain *genpd;
885
886 dev_dbg(dev, "%s()\n", __func__);
887
888 genpd = dev_to_genpd(dev);
889 if (IS_ERR(genpd))
890 return -EINVAL;
891
892 return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
893}
894
895/**
896 * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
897 * @dev: Device to suspend.
898 *
899 * Power off a device under the assumption that its pm_domain field points to
900 * the domain member of an object of type struct generic_pm_domain representing
901 * a PM domain consisting of I/O devices.
902 */
903static int pm_genpd_dev_poweroff(struct device *dev)
904{
905 struct generic_pm_domain *genpd;
906
907 dev_dbg(dev, "%s()\n", __func__);
908
909 genpd = dev_to_genpd(dev);
910 if (IS_ERR(genpd))
911 return -EINVAL;
912
913 return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
914}
915
916/**
917 * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
918 * @dev: Device to suspend.
919 *
920 * Carry out a late powering off of a device under the assumption that its
921 * pm_domain field points to the domain member of an object of type
922 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
923 */
924static int pm_genpd_dev_poweroff_noirq(struct device *dev)
925{
926 struct generic_pm_domain *genpd;
927 int ret;
928
929 dev_dbg(dev, "%s()\n", __func__);
930
931 genpd = dev_to_genpd(dev);
932 if (IS_ERR(genpd))
933 return -EINVAL;
934
935 if (genpd->suspend_power_off)
936 return 0;
937
938 ret = pm_generic_poweroff_noirq(dev);
939 if (ret)
940 return ret;
941
d4f2d87a
RW
942 if (device_may_wakeup(dev)
943 && genpd->active_wakeup && genpd->active_wakeup(dev))
944 return 0;
945
596ba34b
RW
946 if (genpd->stop_device)
947 genpd->stop_device(dev);
948
949 /*
950 * Since all of the "noirq" callbacks are executed sequentially, it is
951 * guaranteed that this function will never run twice in parallel for
952 * the same PM domain, so it is not necessary to use locking here.
953 */
954 genpd->suspended_count++;
955 pm_genpd_sync_poweroff(genpd);
956
957 return 0;
958}
959
960/**
961 * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
962 * @dev: Device to resume.
963 *
964 * Carry out an early restore of a device under the assumption that its
965 * pm_domain field points to the domain member of an object of type
966 * struct generic_pm_domain representing a power domain consisting of I/O
967 * devices.
968 */
969static int pm_genpd_restore_noirq(struct device *dev)
970{
971 struct generic_pm_domain *genpd;
972
973 dev_dbg(dev, "%s()\n", __func__);
974
975 genpd = dev_to_genpd(dev);
976 if (IS_ERR(genpd))
977 return -EINVAL;
978
979 /*
980 * Since all of the "noirq" callbacks are executed sequentially, it is
981 * guaranteed that this function will never run twice in parallel for
982 * the same PM domain, so it is not necessary to use locking here.
983 */
17b75eca 984 genpd->status = GPD_STATE_POWER_OFF;
596ba34b
RW
985 if (genpd->suspend_power_off) {
986 /*
987 * The boot kernel might put the domain into the power on state,
988 * so make sure it really is powered off.
989 */
990 if (genpd->power_off)
991 genpd->power_off(genpd);
992 return 0;
993 }
994
995 pm_genpd_poweron(genpd);
996 genpd->suspended_count--;
997 if (genpd->start_device)
998 genpd->start_device(dev);
999
1000 return pm_generic_restore_noirq(dev);
1001}
1002
1003/**
1004 * pm_genpd_restore - Restore a device belonging to an I/O power domain.
1005 * @dev: Device to resume.
1006 *
1007 * Restore a device under the assumption that its pm_domain field points to the
1008 * domain member of an object of type struct generic_pm_domain representing
1009 * a power domain consisting of I/O devices.
1010 */
1011static int pm_genpd_restore(struct device *dev)
1012{
1013 struct generic_pm_domain *genpd;
1014
1015 dev_dbg(dev, "%s()\n", __func__);
1016
1017 genpd = dev_to_genpd(dev);
1018 if (IS_ERR(genpd))
1019 return -EINVAL;
1020
1021 return genpd->suspend_power_off ? 0 : pm_generic_restore(dev);
1022}
1023
1024/**
1025 * pm_genpd_complete - Complete power transition of a device in a power domain.
1026 * @dev: Device to complete the transition of.
1027 *
1028 * Complete a power transition of a device (during a system-wide power
1029 * transition) under the assumption that its pm_domain field points to the
1030 * domain member of an object of type struct generic_pm_domain representing
1031 * a power domain consisting of I/O devices.
1032 */
1033static void pm_genpd_complete(struct device *dev)
1034{
1035 struct generic_pm_domain *genpd;
1036 bool run_complete;
1037
1038 dev_dbg(dev, "%s()\n", __func__);
1039
1040 genpd = dev_to_genpd(dev);
1041 if (IS_ERR(genpd))
1042 return;
1043
1044 mutex_lock(&genpd->lock);
1045
1046 run_complete = !genpd->suspend_power_off;
1047 if (--genpd->prepared_count == 0)
1048 genpd->suspend_power_off = false;
1049
1050 mutex_unlock(&genpd->lock);
1051
1052 if (run_complete) {
1053 pm_generic_complete(dev);
6f00ff78 1054 pm_runtime_set_active(dev);
596ba34b 1055 pm_runtime_enable(dev);
6f00ff78 1056 pm_runtime_idle(dev);
596ba34b
RW
1057 }
1058}
1059
1060#else
1061
1062#define pm_genpd_prepare NULL
1063#define pm_genpd_suspend NULL
1064#define pm_genpd_suspend_noirq NULL
1065#define pm_genpd_resume_noirq NULL
1066#define pm_genpd_resume NULL
1067#define pm_genpd_freeze NULL
1068#define pm_genpd_freeze_noirq NULL
1069#define pm_genpd_thaw_noirq NULL
1070#define pm_genpd_thaw NULL
1071#define pm_genpd_dev_poweroff_noirq NULL
1072#define pm_genpd_dev_poweroff NULL
1073#define pm_genpd_restore_noirq NULL
1074#define pm_genpd_restore NULL
1075#define pm_genpd_complete NULL
1076
1077#endif /* CONFIG_PM_SLEEP */
1078
f721889f
RW
1079/**
1080 * pm_genpd_add_device - Add a device to an I/O PM domain.
1081 * @genpd: PM domain to add the device to.
1082 * @dev: Device to be added.
1083 */
1084int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1085{
1086 struct dev_list_entry *dle;
1087 int ret = 0;
1088
1089 dev_dbg(dev, "%s()\n", __func__);
1090
1091 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1092 return -EINVAL;
1093
17b75eca 1094 genpd_acquire_lock(genpd);
f721889f 1095
17b75eca 1096 if (genpd->status == GPD_STATE_POWER_OFF) {
f721889f
RW
1097 ret = -EINVAL;
1098 goto out;
1099 }
1100
596ba34b
RW
1101 if (genpd->prepared_count > 0) {
1102 ret = -EAGAIN;
1103 goto out;
1104 }
1105
f721889f
RW
1106 list_for_each_entry(dle, &genpd->dev_list, node)
1107 if (dle->dev == dev) {
1108 ret = -EINVAL;
1109 goto out;
1110 }
1111
1112 dle = kzalloc(sizeof(*dle), GFP_KERNEL);
1113 if (!dle) {
1114 ret = -ENOMEM;
1115 goto out;
1116 }
1117
1118 dle->dev = dev;
1119 dle->need_restore = false;
1120 list_add_tail(&dle->node, &genpd->dev_list);
596ba34b 1121 genpd->device_count++;
f721889f
RW
1122
1123 spin_lock_irq(&dev->power.lock);
1124 dev->pm_domain = &genpd->domain;
1125 spin_unlock_irq(&dev->power.lock);
1126
1127 out:
17b75eca 1128 genpd_release_lock(genpd);
f721889f
RW
1129
1130 return ret;
1131}
1132
1133/**
1134 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1135 * @genpd: PM domain to remove the device from.
1136 * @dev: Device to be removed.
1137 */
1138int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1139 struct device *dev)
1140{
1141 struct dev_list_entry *dle;
1142 int ret = -EINVAL;
1143
1144 dev_dbg(dev, "%s()\n", __func__);
1145
1146 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1147 return -EINVAL;
1148
17b75eca 1149 genpd_acquire_lock(genpd);
f721889f 1150
596ba34b
RW
1151 if (genpd->prepared_count > 0) {
1152 ret = -EAGAIN;
1153 goto out;
1154 }
1155
f721889f
RW
1156 list_for_each_entry(dle, &genpd->dev_list, node) {
1157 if (dle->dev != dev)
1158 continue;
1159
1160 spin_lock_irq(&dev->power.lock);
1161 dev->pm_domain = NULL;
1162 spin_unlock_irq(&dev->power.lock);
1163
596ba34b 1164 genpd->device_count--;
f721889f
RW
1165 list_del(&dle->node);
1166 kfree(dle);
1167
1168 ret = 0;
1169 break;
1170 }
1171
596ba34b 1172 out:
17b75eca 1173 genpd_release_lock(genpd);
f721889f
RW
1174
1175 return ret;
1176}
1177
1178/**
1179 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1180 * @genpd: Master PM domain to add the subdomain to.
1181 * @new_subdomain: Subdomain to be added.
1182 */
1183int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1184 struct generic_pm_domain *new_subdomain)
1185{
5063ce15 1186 struct gpd_link *link;
f721889f
RW
1187 int ret = 0;
1188
1189 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
1190 return -EINVAL;
1191
17b75eca
RW
1192 start:
1193 genpd_acquire_lock(genpd);
1194 mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
f721889f 1195
17b75eca
RW
1196 if (new_subdomain->status != GPD_STATE_POWER_OFF
1197 && new_subdomain->status != GPD_STATE_ACTIVE) {
1198 mutex_unlock(&new_subdomain->lock);
1199 genpd_release_lock(genpd);
1200 goto start;
1201 }
1202
1203 if (genpd->status == GPD_STATE_POWER_OFF
1204 && new_subdomain->status != GPD_STATE_POWER_OFF) {
f721889f
RW
1205 ret = -EINVAL;
1206 goto out;
1207 }
1208
5063ce15
RW
1209 list_for_each_entry(link, &genpd->slave_links, slave_node) {
1210 if (link->slave == new_subdomain && link->master == genpd) {
f721889f
RW
1211 ret = -EINVAL;
1212 goto out;
1213 }
1214 }
1215
5063ce15
RW
1216 link = kzalloc(sizeof(*link), GFP_KERNEL);
1217 if (!link) {
1218 ret = -ENOMEM;
1219 goto out;
1220 }
1221 link->master = genpd;
1222 list_add_tail(&link->master_node, &genpd->master_links);
1223 link->slave = new_subdomain;
1224 list_add_tail(&link->slave_node, &new_subdomain->slave_links);
1225 if (new_subdomain->status != GPD_STATE_POWER_OFF)
c4bb3160 1226 genpd_sd_counter_inc(genpd);
f721889f 1227
f721889f 1228 out:
17b75eca
RW
1229 mutex_unlock(&new_subdomain->lock);
1230 genpd_release_lock(genpd);
f721889f
RW
1231
1232 return ret;
1233}
1234
1235/**
1236 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1237 * @genpd: Master PM domain to remove the subdomain from.
5063ce15 1238 * @subdomain: Subdomain to be removed.
f721889f
RW
1239 */
1240int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
5063ce15 1241 struct generic_pm_domain *subdomain)
f721889f 1242{
5063ce15 1243 struct gpd_link *link;
f721889f
RW
1244 int ret = -EINVAL;
1245
5063ce15 1246 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
f721889f
RW
1247 return -EINVAL;
1248
17b75eca
RW
1249 start:
1250 genpd_acquire_lock(genpd);
f721889f 1251
5063ce15
RW
1252 list_for_each_entry(link, &genpd->master_links, master_node) {
1253 if (link->slave != subdomain)
f721889f
RW
1254 continue;
1255
1256 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1257
17b75eca
RW
1258 if (subdomain->status != GPD_STATE_POWER_OFF
1259 && subdomain->status != GPD_STATE_ACTIVE) {
1260 mutex_unlock(&subdomain->lock);
1261 genpd_release_lock(genpd);
1262 goto start;
1263 }
1264
5063ce15
RW
1265 list_del(&link->master_node);
1266 list_del(&link->slave_node);
1267 kfree(link);
17b75eca 1268 if (subdomain->status != GPD_STATE_POWER_OFF)
f721889f
RW
1269 genpd_sd_counter_dec(genpd);
1270
1271 mutex_unlock(&subdomain->lock);
1272
1273 ret = 0;
1274 break;
1275 }
1276
17b75eca 1277 genpd_release_lock(genpd);
f721889f
RW
1278
1279 return ret;
1280}
1281
1282/**
1283 * pm_genpd_init - Initialize a generic I/O PM domain object.
1284 * @genpd: PM domain object to initialize.
1285 * @gov: PM domain governor to associate with the domain (may be NULL).
1286 * @is_off: Initial value of the domain's power_is_off field.
1287 */
1288void pm_genpd_init(struct generic_pm_domain *genpd,
1289 struct dev_power_governor *gov, bool is_off)
1290{
1291 if (IS_ERR_OR_NULL(genpd))
1292 return;
1293
5063ce15
RW
1294 INIT_LIST_HEAD(&genpd->master_links);
1295 INIT_LIST_HEAD(&genpd->slave_links);
f721889f 1296 INIT_LIST_HEAD(&genpd->dev_list);
f721889f
RW
1297 mutex_init(&genpd->lock);
1298 genpd->gov = gov;
1299 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1300 genpd->in_progress = 0;
c4bb3160 1301 atomic_set(&genpd->sd_count, 0);
17b75eca
RW
1302 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1303 init_waitqueue_head(&genpd->status_wait_queue);
c6d22b37
RW
1304 genpd->poweroff_task = NULL;
1305 genpd->resume_count = 0;
596ba34b
RW
1306 genpd->device_count = 0;
1307 genpd->suspended_count = 0;
f721889f
RW
1308 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1309 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1310 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
596ba34b
RW
1311 genpd->domain.ops.prepare = pm_genpd_prepare;
1312 genpd->domain.ops.suspend = pm_genpd_suspend;
1313 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1314 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1315 genpd->domain.ops.resume = pm_genpd_resume;
1316 genpd->domain.ops.freeze = pm_genpd_freeze;
1317 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1318 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1319 genpd->domain.ops.thaw = pm_genpd_thaw;
1320 genpd->domain.ops.poweroff = pm_genpd_dev_poweroff;
1321 genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq;
1322 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1323 genpd->domain.ops.restore = pm_genpd_restore;
1324 genpd->domain.ops.complete = pm_genpd_complete;
5125bbf3
RW
1325 mutex_lock(&gpd_list_lock);
1326 list_add(&genpd->gpd_list_node, &gpd_list);
1327 mutex_unlock(&gpd_list_lock);
1328}