PM / Domains: Make device removal more straightforward
[linux-2.6-block.git] / drivers / base / power / domain.c
CommitLineData
f721889f
RW
1/*
2 * drivers/base/power/domain.c - Common code related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/io.h>
12#include <linux/pm_runtime.h>
13#include <linux/pm_domain.h>
14#include <linux/slab.h>
15#include <linux/err.h>
17b75eca
RW
16#include <linux/sched.h>
17#include <linux/suspend.h>
d5e4cbfe
RW
18#include <linux/export.h>
19
20#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
21({ \
22 type (*__routine)(struct device *__d); \
23 type __ret = (type)0; \
24 \
25 __routine = genpd->dev_ops.callback; \
26 if (__routine) { \
27 __ret = __routine(dev); \
28 } else { \
29 __routine = dev_gpd_data(dev)->ops.callback; \
30 if (__routine) \
31 __ret = __routine(dev); \
32 } \
33 __ret; \
34})
f721889f 35
0140d8bd
RW
36#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \
37({ \
38 ktime_t __start = ktime_get(); \
39 type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \
40 s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \
41 struct generic_pm_domain_data *__gpd_data = dev_gpd_data(dev); \
42 if (__elapsed > __gpd_data->td.field) { \
43 __gpd_data->td.field = __elapsed; \
44 dev_warn(dev, name " latency exceeded, new value %lld ns\n", \
45 __elapsed); \
46 } \
47 __retval; \
48})
49
5125bbf3
RW
50static LIST_HEAD(gpd_list);
51static DEFINE_MUTEX(gpd_list_lock);
52
5248051b
RW
53#ifdef CONFIG_PM
54
b02c999a 55struct generic_pm_domain *dev_to_genpd(struct device *dev)
5248051b
RW
56{
57 if (IS_ERR_OR_NULL(dev->pm_domain))
58 return ERR_PTR(-EINVAL);
59
596ba34b 60 return pd_to_genpd(dev->pm_domain);
5248051b 61}
f721889f 62
d5e4cbfe
RW
63static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
64{
0140d8bd
RW
65 return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
66 stop_latency_ns, "stop");
d5e4cbfe
RW
67}
68
69static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
70{
0140d8bd
RW
71 return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
72 start_latency_ns, "start");
d5e4cbfe
RW
73}
74
ecf00475
RW
75static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
76{
0140d8bd
RW
77 return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
78 save_state_latency_ns, "state save");
ecf00475
RW
79}
80
81static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
82{
0140d8bd
RW
83 return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
84 restore_state_latency_ns,
85 "state restore");
ecf00475
RW
86}
87
c4bb3160 88static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
f721889f 89{
c4bb3160
RW
90 bool ret = false;
91
92 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
93 ret = !!atomic_dec_and_test(&genpd->sd_count);
94
95 return ret;
96}
97
98static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
99{
100 atomic_inc(&genpd->sd_count);
101 smp_mb__after_atomic_inc();
f721889f
RW
102}
103
17b75eca
RW
104static void genpd_acquire_lock(struct generic_pm_domain *genpd)
105{
106 DEFINE_WAIT(wait);
107
108 mutex_lock(&genpd->lock);
109 /*
110 * Wait for the domain to transition into either the active,
111 * or the power off state.
112 */
113 for (;;) {
114 prepare_to_wait(&genpd->status_wait_queue, &wait,
115 TASK_UNINTERRUPTIBLE);
c6d22b37
RW
116 if (genpd->status == GPD_STATE_ACTIVE
117 || genpd->status == GPD_STATE_POWER_OFF)
17b75eca
RW
118 break;
119 mutex_unlock(&genpd->lock);
120
121 schedule();
122
123 mutex_lock(&genpd->lock);
124 }
125 finish_wait(&genpd->status_wait_queue, &wait);
126}
127
128static void genpd_release_lock(struct generic_pm_domain *genpd)
129{
130 mutex_unlock(&genpd->lock);
131}
132
c6d22b37
RW
133static void genpd_set_active(struct generic_pm_domain *genpd)
134{
135 if (genpd->resume_count == 0)
136 genpd->status = GPD_STATE_ACTIVE;
137}
138
5248051b 139/**
5063ce15 140 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
5248051b
RW
141 * @genpd: PM domain to power up.
142 *
5063ce15 143 * Restore power to @genpd and all of its masters so that it is possible to
5248051b
RW
144 * resume a device belonging to it.
145 */
3f241775
RW
146int __pm_genpd_poweron(struct generic_pm_domain *genpd)
147 __releases(&genpd->lock) __acquires(&genpd->lock)
5248051b 148{
5063ce15 149 struct gpd_link *link;
3f241775 150 DEFINE_WAIT(wait);
5248051b
RW
151 int ret = 0;
152
5063ce15 153 /* If the domain's master is being waited for, we have to wait too. */
3f241775
RW
154 for (;;) {
155 prepare_to_wait(&genpd->status_wait_queue, &wait,
156 TASK_UNINTERRUPTIBLE);
17877eb5 157 if (genpd->status != GPD_STATE_WAIT_MASTER)
3f241775
RW
158 break;
159 mutex_unlock(&genpd->lock);
17b75eca 160
3f241775
RW
161 schedule();
162
163 mutex_lock(&genpd->lock);
164 }
165 finish_wait(&genpd->status_wait_queue, &wait);
9e08cf42 166
17b75eca 167 if (genpd->status == GPD_STATE_ACTIVE
596ba34b 168 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
3f241775 169 return 0;
5248051b 170
c6d22b37
RW
171 if (genpd->status != GPD_STATE_POWER_OFF) {
172 genpd_set_active(genpd);
3f241775 173 return 0;
c6d22b37
RW
174 }
175
5063ce15
RW
176 /*
177 * The list is guaranteed not to change while the loop below is being
178 * executed, unless one of the masters' .power_on() callbacks fiddles
179 * with it.
180 */
181 list_for_each_entry(link, &genpd->slave_links, slave_node) {
182 genpd_sd_counter_inc(link->master);
17877eb5 183 genpd->status = GPD_STATE_WAIT_MASTER;
3c07cbc4 184
5248051b 185 mutex_unlock(&genpd->lock);
5248051b 186
5063ce15 187 ret = pm_genpd_poweron(link->master);
9e08cf42
RW
188
189 mutex_lock(&genpd->lock);
190
3f241775
RW
191 /*
192 * The "wait for parent" status is guaranteed not to change
5063ce15 193 * while the master is powering on.
3f241775
RW
194 */
195 genpd->status = GPD_STATE_POWER_OFF;
196 wake_up_all(&genpd->status_wait_queue);
5063ce15
RW
197 if (ret) {
198 genpd_sd_counter_dec(link->master);
9e08cf42 199 goto err;
5063ce15 200 }
5248051b
RW
201 }
202
9e08cf42 203 if (genpd->power_on) {
0140d8bd
RW
204 ktime_t time_start = ktime_get();
205 s64 elapsed_ns;
206
fe202fde 207 ret = genpd->power_on(genpd);
9e08cf42
RW
208 if (ret)
209 goto err;
0140d8bd
RW
210
211 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
e84b2c20 212 if (elapsed_ns > genpd->power_on_latency_ns) {
0140d8bd 213 genpd->power_on_latency_ns = elapsed_ns;
e84b2c20
RW
214 if (genpd->name)
215 pr_warning("%s: Power-on latency exceeded, "
216 "new value %lld ns\n", genpd->name,
217 elapsed_ns);
218 }
3c07cbc4 219 }
5248051b 220
9e08cf42
RW
221 genpd_set_active(genpd);
222
3f241775 223 return 0;
9e08cf42
RW
224
225 err:
5063ce15
RW
226 list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
227 genpd_sd_counter_dec(link->master);
9e08cf42 228
3f241775
RW
229 return ret;
230}
231
232/**
5063ce15 233 * pm_genpd_poweron - Restore power to a given PM domain and its masters.
3f241775
RW
234 * @genpd: PM domain to power up.
235 */
236int pm_genpd_poweron(struct generic_pm_domain *genpd)
237{
238 int ret;
239
240 mutex_lock(&genpd->lock);
241 ret = __pm_genpd_poweron(genpd);
242 mutex_unlock(&genpd->lock);
243 return ret;
5248051b
RW
244}
245
246#endif /* CONFIG_PM */
247
248#ifdef CONFIG_PM_RUNTIME
249
f721889f
RW
250/**
251 * __pm_genpd_save_device - Save the pre-suspend state of a device.
4605ab65 252 * @pdd: Domain data of the device to save the state of.
f721889f
RW
253 * @genpd: PM domain the device belongs to.
254 */
4605ab65 255static int __pm_genpd_save_device(struct pm_domain_data *pdd,
f721889f 256 struct generic_pm_domain *genpd)
17b75eca 257 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f 258{
cd0ea672 259 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
4605ab65 260 struct device *dev = pdd->dev;
f721889f
RW
261 int ret = 0;
262
cd0ea672 263 if (gpd_data->need_restore)
f721889f
RW
264 return 0;
265
17b75eca
RW
266 mutex_unlock(&genpd->lock);
267
ecf00475
RW
268 genpd_start_dev(genpd, dev);
269 ret = genpd_save_dev(genpd, dev);
270 genpd_stop_dev(genpd, dev);
f721889f 271
17b75eca
RW
272 mutex_lock(&genpd->lock);
273
f721889f 274 if (!ret)
cd0ea672 275 gpd_data->need_restore = true;
f721889f
RW
276
277 return ret;
278}
279
280/**
281 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
4605ab65 282 * @pdd: Domain data of the device to restore the state of.
f721889f
RW
283 * @genpd: PM domain the device belongs to.
284 */
4605ab65 285static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
f721889f 286 struct generic_pm_domain *genpd)
17b75eca 287 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f 288{
cd0ea672 289 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
4605ab65 290 struct device *dev = pdd->dev;
f721889f 291
cd0ea672 292 if (!gpd_data->need_restore)
f721889f
RW
293 return;
294
17b75eca
RW
295 mutex_unlock(&genpd->lock);
296
ecf00475
RW
297 genpd_start_dev(genpd, dev);
298 genpd_restore_dev(genpd, dev);
299 genpd_stop_dev(genpd, dev);
f721889f 300
17b75eca
RW
301 mutex_lock(&genpd->lock);
302
cd0ea672 303 gpd_data->need_restore = false;
f721889f
RW
304}
305
c6d22b37
RW
306/**
307 * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
308 * @genpd: PM domain to check.
309 *
310 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
311 * a "power off" operation, which means that a "power on" has occured in the
312 * meantime, or if its resume_count field is different from zero, which means
313 * that one of its devices has been resumed in the meantime.
314 */
315static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
316{
17877eb5 317 return genpd->status == GPD_STATE_WAIT_MASTER
3f241775 318 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
c6d22b37
RW
319}
320
56375fd4
RW
321/**
322 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
323 * @genpd: PM domait to power off.
324 *
325 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
326 * before.
327 */
0bc5b2de 328void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
56375fd4
RW
329{
330 if (!work_pending(&genpd->power_off_work))
331 queue_work(pm_wq, &genpd->power_off_work);
332}
333
f721889f
RW
334/**
335 * pm_genpd_poweroff - Remove power from a given PM domain.
336 * @genpd: PM domain to power down.
337 *
338 * If all of the @genpd's devices have been suspended and all of its subdomains
339 * have been powered down, run the runtime suspend callbacks provided by all of
340 * the @genpd's devices' drivers and remove power from @genpd.
341 */
342static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
17b75eca 343 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f 344{
4605ab65 345 struct pm_domain_data *pdd;
5063ce15 346 struct gpd_link *link;
f721889f 347 unsigned int not_suspended;
c6d22b37 348 int ret = 0;
f721889f 349
c6d22b37
RW
350 start:
351 /*
352 * Do not try to power off the domain in the following situations:
353 * (1) The domain is already in the "power off" state.
5063ce15 354 * (2) The domain is waiting for its master to power up.
c6d22b37 355 * (3) One of the domain's devices is being resumed right now.
3f241775 356 * (4) System suspend is in progress.
c6d22b37 357 */
3f241775 358 if (genpd->status == GPD_STATE_POWER_OFF
17877eb5 359 || genpd->status == GPD_STATE_WAIT_MASTER
3f241775 360 || genpd->resume_count > 0 || genpd->prepared_count > 0)
f721889f
RW
361 return 0;
362
c4bb3160 363 if (atomic_read(&genpd->sd_count) > 0)
f721889f
RW
364 return -EBUSY;
365
366 not_suspended = 0;
4605ab65 367 list_for_each_entry(pdd, &genpd->dev_list, list_node)
0aa2a221 368 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
1e78a0c7 369 || pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on))
f721889f
RW
370 not_suspended++;
371
372 if (not_suspended > genpd->in_progress)
373 return -EBUSY;
374
c6d22b37
RW
375 if (genpd->poweroff_task) {
376 /*
377 * Another instance of pm_genpd_poweroff() is executing
378 * callbacks, so tell it to start over and return.
379 */
380 genpd->status = GPD_STATE_REPEAT;
381 return 0;
382 }
383
dd8683e9 384 genpd->max_off_time_ns = -1;
f721889f
RW
385 if (genpd->gov && genpd->gov->power_down_ok) {
386 if (!genpd->gov->power_down_ok(&genpd->domain))
387 return -EAGAIN;
388 }
389
17b75eca 390 genpd->status = GPD_STATE_BUSY;
c6d22b37 391 genpd->poweroff_task = current;
17b75eca 392
4605ab65 393 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
3c07cbc4 394 ret = atomic_read(&genpd->sd_count) == 0 ?
4605ab65 395 __pm_genpd_save_device(pdd, genpd) : -EBUSY;
3f241775
RW
396
397 if (genpd_abort_poweroff(genpd))
398 goto out;
399
697a7f37
RW
400 if (ret) {
401 genpd_set_active(genpd);
402 goto out;
403 }
f721889f 404
c6d22b37
RW
405 if (genpd->status == GPD_STATE_REPEAT) {
406 genpd->poweroff_task = NULL;
407 goto start;
408 }
409 }
17b75eca 410
3c07cbc4 411 if (genpd->power_off) {
0140d8bd
RW
412 ktime_t time_start;
413 s64 elapsed_ns;
414
3c07cbc4
RW
415 if (atomic_read(&genpd->sd_count) > 0) {
416 ret = -EBUSY;
c6d22b37
RW
417 goto out;
418 }
17b75eca 419
0140d8bd
RW
420 time_start = ktime_get();
421
3c07cbc4 422 /*
5063ce15
RW
423 * If sd_count > 0 at this point, one of the subdomains hasn't
424 * managed to call pm_genpd_poweron() for the master yet after
3c07cbc4
RW
425 * incrementing it. In that case pm_genpd_poweron() will wait
426 * for us to drop the lock, so we can call .power_off() and let
427 * the pm_genpd_poweron() restore power for us (this shouldn't
428 * happen very often).
429 */
d2805402
RW
430 ret = genpd->power_off(genpd);
431 if (ret == -EBUSY) {
432 genpd_set_active(genpd);
d2805402
RW
433 goto out;
434 }
0140d8bd
RW
435
436 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
e84b2c20 437 if (elapsed_ns > genpd->power_off_latency_ns) {
0140d8bd 438 genpd->power_off_latency_ns = elapsed_ns;
e84b2c20
RW
439 if (genpd->name)
440 pr_warning("%s: Power-off latency exceeded, "
441 "new value %lld ns\n", genpd->name,
442 elapsed_ns);
443 }
d2805402 444 }
f721889f 445
17b75eca 446 genpd->status = GPD_STATE_POWER_OFF;
221e9b58 447
5063ce15
RW
448 list_for_each_entry(link, &genpd->slave_links, slave_node) {
449 genpd_sd_counter_dec(link->master);
450 genpd_queue_power_off_work(link->master);
451 }
f721889f 452
c6d22b37
RW
453 out:
454 genpd->poweroff_task = NULL;
455 wake_up_all(&genpd->status_wait_queue);
456 return ret;
f721889f
RW
457}
458
459/**
460 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
461 * @work: Work structure used for scheduling the execution of this function.
462 */
463static void genpd_power_off_work_fn(struct work_struct *work)
464{
465 struct generic_pm_domain *genpd;
466
467 genpd = container_of(work, struct generic_pm_domain, power_off_work);
468
17b75eca 469 genpd_acquire_lock(genpd);
f721889f 470 pm_genpd_poweroff(genpd);
17b75eca 471 genpd_release_lock(genpd);
f721889f
RW
472}
473
474/**
475 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
476 * @dev: Device to suspend.
477 *
478 * Carry out a runtime suspend of a device under the assumption that its
479 * pm_domain field points to the domain member of an object of type
480 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
481 */
482static int pm_genpd_runtime_suspend(struct device *dev)
483{
484 struct generic_pm_domain *genpd;
b02c999a 485 bool (*stop_ok)(struct device *__dev);
d5e4cbfe 486 int ret;
f721889f
RW
487
488 dev_dbg(dev, "%s()\n", __func__);
489
5248051b
RW
490 genpd = dev_to_genpd(dev);
491 if (IS_ERR(genpd))
f721889f
RW
492 return -EINVAL;
493
0aa2a221
RW
494 might_sleep_if(!genpd->dev_irq_safe);
495
1e78a0c7
RW
496 if (dev_gpd_data(dev)->always_on)
497 return -EBUSY;
498
a5bef810 499 dev_gpd_data(dev)->td.effective_constraint_ns = -1;
b02c999a
RW
500 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
501 if (stop_ok && !stop_ok(dev))
502 return -EBUSY;
503
d5e4cbfe
RW
504 ret = genpd_stop_dev(genpd, dev);
505 if (ret)
506 return ret;
17b75eca 507
0aa2a221
RW
508 /*
509 * If power.irq_safe is set, this routine will be run with interrupts
510 * off, so it can't use mutexes.
511 */
512 if (dev->power.irq_safe)
513 return 0;
514
c6d22b37 515 mutex_lock(&genpd->lock);
f721889f
RW
516 genpd->in_progress++;
517 pm_genpd_poweroff(genpd);
518 genpd->in_progress--;
c6d22b37 519 mutex_unlock(&genpd->lock);
f721889f
RW
520
521 return 0;
522}
523
f721889f
RW
524/**
525 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
526 * @dev: Device to resume.
527 *
528 * Carry out a runtime resume of a device under the assumption that its
529 * pm_domain field points to the domain member of an object of type
530 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
531 */
532static int pm_genpd_runtime_resume(struct device *dev)
533{
534 struct generic_pm_domain *genpd;
c6d22b37 535 DEFINE_WAIT(wait);
f721889f
RW
536 int ret;
537
538 dev_dbg(dev, "%s()\n", __func__);
539
5248051b
RW
540 genpd = dev_to_genpd(dev);
541 if (IS_ERR(genpd))
f721889f
RW
542 return -EINVAL;
543
0aa2a221
RW
544 might_sleep_if(!genpd->dev_irq_safe);
545
546 /* If power.irq_safe, the PM domain is never powered off. */
547 if (dev->power.irq_safe)
548 goto out;
549
c6d22b37 550 mutex_lock(&genpd->lock);
3f241775
RW
551 ret = __pm_genpd_poweron(genpd);
552 if (ret) {
553 mutex_unlock(&genpd->lock);
554 return ret;
555 }
17b75eca 556 genpd->status = GPD_STATE_BUSY;
c6d22b37
RW
557 genpd->resume_count++;
558 for (;;) {
559 prepare_to_wait(&genpd->status_wait_queue, &wait,
560 TASK_UNINTERRUPTIBLE);
561 /*
562 * If current is the powering off task, we have been called
563 * reentrantly from one of the device callbacks, so we should
564 * not wait.
565 */
566 if (!genpd->poweroff_task || genpd->poweroff_task == current)
567 break;
568 mutex_unlock(&genpd->lock);
569
570 schedule();
571
572 mutex_lock(&genpd->lock);
573 }
574 finish_wait(&genpd->status_wait_queue, &wait);
cd0ea672 575 __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
c6d22b37
RW
576 genpd->resume_count--;
577 genpd_set_active(genpd);
17b75eca 578 wake_up_all(&genpd->status_wait_queue);
c6d22b37 579 mutex_unlock(&genpd->lock);
17b75eca 580
0aa2a221 581 out:
d5e4cbfe 582 genpd_start_dev(genpd, dev);
f721889f
RW
583
584 return 0;
585}
586
17f2ae7f
RW
587/**
588 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
589 */
590void pm_genpd_poweroff_unused(void)
591{
592 struct generic_pm_domain *genpd;
593
594 mutex_lock(&gpd_list_lock);
595
596 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
597 genpd_queue_power_off_work(genpd);
598
599 mutex_unlock(&gpd_list_lock);
600}
601
f721889f
RW
602#else
603
604static inline void genpd_power_off_work_fn(struct work_struct *work) {}
605
606#define pm_genpd_runtime_suspend NULL
607#define pm_genpd_runtime_resume NULL
608
609#endif /* CONFIG_PM_RUNTIME */
610
596ba34b
RW
611#ifdef CONFIG_PM_SLEEP
612
d5e4cbfe
RW
613static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
614 struct device *dev)
615{
616 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
617}
618
d23b9b00
RW
619static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
620{
621 return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
622}
623
624static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
625{
626 return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
627}
628
629static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
630{
631 return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
632}
633
634static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
635{
636 return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
637}
638
639static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
640{
641 return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
642}
643
644static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
645{
646 return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
647}
648
649static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
650{
651 return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
652}
653
654static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
655{
656 return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
657}
658
596ba34b 659/**
5063ce15 660 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
596ba34b
RW
661 * @genpd: PM domain to power off, if possible.
662 *
663 * Check if the given PM domain can be powered off (during system suspend or
5063ce15 664 * hibernation) and do that if so. Also, in that case propagate to its masters.
596ba34b
RW
665 *
666 * This function is only called in "noirq" stages of system power transitions,
667 * so it need not acquire locks (all of the "noirq" callbacks are executed
668 * sequentially, so it is guaranteed that it will never run twice in parallel).
669 */
670static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
671{
5063ce15 672 struct gpd_link *link;
596ba34b 673
17b75eca 674 if (genpd->status == GPD_STATE_POWER_OFF)
596ba34b
RW
675 return;
676
c4bb3160
RW
677 if (genpd->suspended_count != genpd->device_count
678 || atomic_read(&genpd->sd_count) > 0)
596ba34b
RW
679 return;
680
681 if (genpd->power_off)
682 genpd->power_off(genpd);
683
17b75eca 684 genpd->status = GPD_STATE_POWER_OFF;
5063ce15
RW
685
686 list_for_each_entry(link, &genpd->slave_links, slave_node) {
687 genpd_sd_counter_dec(link->master);
688 pm_genpd_sync_poweroff(link->master);
596ba34b
RW
689 }
690}
691
4ecd6e65
RW
692/**
693 * resume_needed - Check whether to resume a device before system suspend.
694 * @dev: Device to check.
695 * @genpd: PM domain the device belongs to.
696 *
697 * There are two cases in which a device that can wake up the system from sleep
698 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
699 * to wake up the system and it has to remain active for this purpose while the
700 * system is in the sleep state and (2) if the device is not enabled to wake up
701 * the system from sleep states and it generally doesn't generate wakeup signals
702 * by itself (those signals are generated on its behalf by other parts of the
703 * system). In the latter case it may be necessary to reconfigure the device's
704 * wakeup settings during system suspend, because it may have been set up to
705 * signal remote wakeup from the system's working state as needed by runtime PM.
706 * Return 'true' in either of the above cases.
707 */
708static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
709{
710 bool active_wakeup;
711
712 if (!device_can_wakeup(dev))
713 return false;
714
d5e4cbfe 715 active_wakeup = genpd_dev_active_wakeup(genpd, dev);
4ecd6e65
RW
716 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
717}
718
596ba34b
RW
719/**
720 * pm_genpd_prepare - Start power transition of a device in a PM domain.
721 * @dev: Device to start the transition of.
722 *
723 * Start a power transition of a device (during a system-wide power transition)
724 * under the assumption that its pm_domain field points to the domain member of
725 * an object of type struct generic_pm_domain representing a PM domain
726 * consisting of I/O devices.
727 */
728static int pm_genpd_prepare(struct device *dev)
729{
730 struct generic_pm_domain *genpd;
b6c10c84 731 int ret;
596ba34b
RW
732
733 dev_dbg(dev, "%s()\n", __func__);
734
735 genpd = dev_to_genpd(dev);
736 if (IS_ERR(genpd))
737 return -EINVAL;
738
17b75eca
RW
739 /*
740 * If a wakeup request is pending for the device, it should be woken up
741 * at this point and a system wakeup event should be reported if it's
742 * set up to wake up the system from sleep states.
743 */
744 pm_runtime_get_noresume(dev);
745 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
746 pm_wakeup_event(dev, 0);
747
748 if (pm_wakeup_pending()) {
749 pm_runtime_put_sync(dev);
750 return -EBUSY;
751 }
752
4ecd6e65
RW
753 if (resume_needed(dev, genpd))
754 pm_runtime_resume(dev);
755
17b75eca 756 genpd_acquire_lock(genpd);
596ba34b 757
65533bbf
RW
758 if (genpd->prepared_count++ == 0) {
759 genpd->suspended_count = 0;
17b75eca 760 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
65533bbf 761 }
17b75eca
RW
762
763 genpd_release_lock(genpd);
596ba34b
RW
764
765 if (genpd->suspend_power_off) {
17b75eca 766 pm_runtime_put_noidle(dev);
596ba34b
RW
767 return 0;
768 }
769
770 /*
17b75eca
RW
771 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
772 * so pm_genpd_poweron() will return immediately, but if the device
d5e4cbfe 773 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
17b75eca 774 * to make it operational.
596ba34b 775 */
17b75eca 776 pm_runtime_resume(dev);
596ba34b
RW
777 __pm_runtime_disable(dev, false);
778
b6c10c84
RW
779 ret = pm_generic_prepare(dev);
780 if (ret) {
781 mutex_lock(&genpd->lock);
782
783 if (--genpd->prepared_count == 0)
784 genpd->suspend_power_off = false;
785
786 mutex_unlock(&genpd->lock);
17b75eca 787 pm_runtime_enable(dev);
b6c10c84 788 }
17b75eca
RW
789
790 pm_runtime_put_sync(dev);
b6c10c84 791 return ret;
596ba34b
RW
792}
793
794/**
795 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
796 * @dev: Device to suspend.
797 *
798 * Suspend a device under the assumption that its pm_domain field points to the
799 * domain member of an object of type struct generic_pm_domain representing
800 * a PM domain consisting of I/O devices.
801 */
802static int pm_genpd_suspend(struct device *dev)
803{
804 struct generic_pm_domain *genpd;
805
806 dev_dbg(dev, "%s()\n", __func__);
807
808 genpd = dev_to_genpd(dev);
809 if (IS_ERR(genpd))
810 return -EINVAL;
811
d23b9b00 812 return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
596ba34b
RW
813}
814
815/**
0496c8ae 816 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
596ba34b
RW
817 * @dev: Device to suspend.
818 *
819 * Carry out a late suspend of a device under the assumption that its
820 * pm_domain field points to the domain member of an object of type
821 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
822 */
0496c8ae 823static int pm_genpd_suspend_late(struct device *dev)
596ba34b
RW
824{
825 struct generic_pm_domain *genpd;
596ba34b
RW
826
827 dev_dbg(dev, "%s()\n", __func__);
828
829 genpd = dev_to_genpd(dev);
830 if (IS_ERR(genpd))
831 return -EINVAL;
832
0496c8ae
RW
833 return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev);
834}
596ba34b 835
0496c8ae
RW
836/**
837 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
838 * @dev: Device to suspend.
839 *
840 * Stop the device and remove power from the domain if all devices in it have
841 * been stopped.
842 */
843static int pm_genpd_suspend_noirq(struct device *dev)
844{
845 struct generic_pm_domain *genpd;
846
847 dev_dbg(dev, "%s()\n", __func__);
848
849 genpd = dev_to_genpd(dev);
850 if (IS_ERR(genpd))
851 return -EINVAL;
596ba34b 852
1e78a0c7 853 if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
0496c8ae 854 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
d4f2d87a
RW
855 return 0;
856
d5e4cbfe 857 genpd_stop_dev(genpd, dev);
596ba34b
RW
858
859 /*
860 * Since all of the "noirq" callbacks are executed sequentially, it is
861 * guaranteed that this function will never run twice in parallel for
862 * the same PM domain, so it is not necessary to use locking here.
863 */
864 genpd->suspended_count++;
865 pm_genpd_sync_poweroff(genpd);
866
867 return 0;
868}
869
870/**
0496c8ae 871 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
596ba34b
RW
872 * @dev: Device to resume.
873 *
0496c8ae 874 * Restore power to the device's PM domain, if necessary, and start the device.
596ba34b
RW
875 */
876static int pm_genpd_resume_noirq(struct device *dev)
877{
878 struct generic_pm_domain *genpd;
879
880 dev_dbg(dev, "%s()\n", __func__);
881
882 genpd = dev_to_genpd(dev);
883 if (IS_ERR(genpd))
884 return -EINVAL;
885
1e78a0c7 886 if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
cc85b207 887 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
596ba34b
RW
888 return 0;
889
890 /*
891 * Since all of the "noirq" callbacks are executed sequentially, it is
892 * guaranteed that this function will never run twice in parallel for
893 * the same PM domain, so it is not necessary to use locking here.
894 */
895 pm_genpd_poweron(genpd);
896 genpd->suspended_count--;
596ba34b 897
0496c8ae 898 return genpd_start_dev(genpd, dev);
596ba34b
RW
899}
900
901/**
0496c8ae
RW
902 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
903 * @dev: Device to resume.
904 *
905 * Carry out an early resume of a device under the assumption that its
906 * pm_domain field points to the domain member of an object of type
907 * struct generic_pm_domain representing a power domain consisting of I/O
908 * devices.
909 */
910static int pm_genpd_resume_early(struct device *dev)
911{
912 struct generic_pm_domain *genpd;
913
914 dev_dbg(dev, "%s()\n", __func__);
915
916 genpd = dev_to_genpd(dev);
917 if (IS_ERR(genpd))
918 return -EINVAL;
919
920 return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev);
921}
922
923/**
924 * pm_genpd_resume - Resume of device in an I/O PM domain.
596ba34b
RW
925 * @dev: Device to resume.
926 *
927 * Resume a device under the assumption that its pm_domain field points to the
928 * domain member of an object of type struct generic_pm_domain representing
929 * a power domain consisting of I/O devices.
930 */
931static int pm_genpd_resume(struct device *dev)
932{
933 struct generic_pm_domain *genpd;
934
935 dev_dbg(dev, "%s()\n", __func__);
936
937 genpd = dev_to_genpd(dev);
938 if (IS_ERR(genpd))
939 return -EINVAL;
940
d23b9b00 941 return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
596ba34b
RW
942}
943
944/**
0496c8ae 945 * pm_genpd_freeze - Freezing a device in an I/O PM domain.
596ba34b
RW
946 * @dev: Device to freeze.
947 *
948 * Freeze a device under the assumption that its pm_domain field points to the
949 * domain member of an object of type struct generic_pm_domain representing
950 * a power domain consisting of I/O devices.
951 */
952static int pm_genpd_freeze(struct device *dev)
953{
954 struct generic_pm_domain *genpd;
955
956 dev_dbg(dev, "%s()\n", __func__);
957
958 genpd = dev_to_genpd(dev);
959 if (IS_ERR(genpd))
960 return -EINVAL;
961
d23b9b00 962 return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
596ba34b
RW
963}
964
965/**
0496c8ae
RW
966 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
967 * @dev: Device to freeze.
968 *
969 * Carry out a late freeze of a device under the assumption that its
970 * pm_domain field points to the domain member of an object of type
971 * struct generic_pm_domain representing a power domain consisting of I/O
972 * devices.
973 */
974static int pm_genpd_freeze_late(struct device *dev)
975{
976 struct generic_pm_domain *genpd;
977
978 dev_dbg(dev, "%s()\n", __func__);
979
980 genpd = dev_to_genpd(dev);
981 if (IS_ERR(genpd))
982 return -EINVAL;
983
984 return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev);
985}
986
987/**
988 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
596ba34b
RW
989 * @dev: Device to freeze.
990 *
991 * Carry out a late freeze of a device under the assumption that its
992 * pm_domain field points to the domain member of an object of type
993 * struct generic_pm_domain representing a power domain consisting of I/O
994 * devices.
995 */
996static int pm_genpd_freeze_noirq(struct device *dev)
997{
998 struct generic_pm_domain *genpd;
596ba34b
RW
999
1000 dev_dbg(dev, "%s()\n", __func__);
1001
1002 genpd = dev_to_genpd(dev);
1003 if (IS_ERR(genpd))
1004 return -EINVAL;
1005
1e78a0c7
RW
1006 return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
1007 0 : genpd_stop_dev(genpd, dev);
0496c8ae 1008}
596ba34b 1009
0496c8ae
RW
1010/**
1011 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1012 * @dev: Device to thaw.
1013 *
1014 * Start the device, unless power has been removed from the domain already
1015 * before the system transition.
1016 */
1017static int pm_genpd_thaw_noirq(struct device *dev)
1018{
1019 struct generic_pm_domain *genpd;
596ba34b 1020
0496c8ae 1021 dev_dbg(dev, "%s()\n", __func__);
596ba34b 1022
0496c8ae
RW
1023 genpd = dev_to_genpd(dev);
1024 if (IS_ERR(genpd))
1025 return -EINVAL;
1026
1e78a0c7
RW
1027 return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
1028 0 : genpd_start_dev(genpd, dev);
596ba34b
RW
1029}
1030
1031/**
0496c8ae 1032 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
596ba34b
RW
1033 * @dev: Device to thaw.
1034 *
1035 * Carry out an early thaw of a device under the assumption that its
1036 * pm_domain field points to the domain member of an object of type
1037 * struct generic_pm_domain representing a power domain consisting of I/O
1038 * devices.
1039 */
0496c8ae 1040static int pm_genpd_thaw_early(struct device *dev)
596ba34b
RW
1041{
1042 struct generic_pm_domain *genpd;
1043
1044 dev_dbg(dev, "%s()\n", __func__);
1045
1046 genpd = dev_to_genpd(dev);
1047 if (IS_ERR(genpd))
1048 return -EINVAL;
1049
0496c8ae 1050 return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev);
596ba34b
RW
1051}
1052
1053/**
1054 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
1055 * @dev: Device to thaw.
1056 *
1057 * Thaw a device under the assumption that its pm_domain field points to the
1058 * domain member of an object of type struct generic_pm_domain representing
1059 * a power domain consisting of I/O devices.
1060 */
1061static int pm_genpd_thaw(struct device *dev)
1062{
1063 struct generic_pm_domain *genpd;
1064
1065 dev_dbg(dev, "%s()\n", __func__);
1066
1067 genpd = dev_to_genpd(dev);
1068 if (IS_ERR(genpd))
1069 return -EINVAL;
1070
d23b9b00 1071 return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
596ba34b
RW
1072}
1073
1074/**
0496c8ae 1075 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
596ba34b
RW
1076 * @dev: Device to resume.
1077 *
0496c8ae
RW
1078 * Make sure the domain will be in the same power state as before the
1079 * hibernation the system is resuming from and start the device if necessary.
596ba34b
RW
1080 */
1081static int pm_genpd_restore_noirq(struct device *dev)
1082{
1083 struct generic_pm_domain *genpd;
1084
1085 dev_dbg(dev, "%s()\n", __func__);
1086
1087 genpd = dev_to_genpd(dev);
1088 if (IS_ERR(genpd))
1089 return -EINVAL;
1090
1091 /*
1092 * Since all of the "noirq" callbacks are executed sequentially, it is
1093 * guaranteed that this function will never run twice in parallel for
1094 * the same PM domain, so it is not necessary to use locking here.
65533bbf
RW
1095 *
1096 * At this point suspended_count == 0 means we are being run for the
1097 * first time for the given domain in the present cycle.
596ba34b 1098 */
65533bbf 1099 if (genpd->suspended_count++ == 0) {
596ba34b 1100 /*
65533bbf
RW
1101 * The boot kernel might put the domain into arbitrary state,
1102 * so make it appear as powered off to pm_genpd_poweron(), so
1103 * that it tries to power it on in case it was really off.
596ba34b 1104 */
65533bbf
RW
1105 genpd->status = GPD_STATE_POWER_OFF;
1106 if (genpd->suspend_power_off) {
1107 /*
1108 * If the domain was off before the hibernation, make
1109 * sure it will be off going forward.
1110 */
1111 if (genpd->power_off)
1112 genpd->power_off(genpd);
1113
1114 return 0;
1115 }
596ba34b
RW
1116 }
1117
18dd2ece
RW
1118 if (genpd->suspend_power_off)
1119 return 0;
1120
596ba34b 1121 pm_genpd_poweron(genpd);
596ba34b 1122
1e78a0c7 1123 return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev);
596ba34b
RW
1124}
1125
1126/**
1127 * pm_genpd_complete - Complete power transition of a device in a power domain.
1128 * @dev: Device to complete the transition of.
1129 *
1130 * Complete a power transition of a device (during a system-wide power
1131 * transition) under the assumption that its pm_domain field points to the
1132 * domain member of an object of type struct generic_pm_domain representing
1133 * a power domain consisting of I/O devices.
1134 */
1135static void pm_genpd_complete(struct device *dev)
1136{
1137 struct generic_pm_domain *genpd;
1138 bool run_complete;
1139
1140 dev_dbg(dev, "%s()\n", __func__);
1141
1142 genpd = dev_to_genpd(dev);
1143 if (IS_ERR(genpd))
1144 return;
1145
1146 mutex_lock(&genpd->lock);
1147
1148 run_complete = !genpd->suspend_power_off;
1149 if (--genpd->prepared_count == 0)
1150 genpd->suspend_power_off = false;
1151
1152 mutex_unlock(&genpd->lock);
1153
1154 if (run_complete) {
1155 pm_generic_complete(dev);
6f00ff78 1156 pm_runtime_set_active(dev);
596ba34b 1157 pm_runtime_enable(dev);
6f00ff78 1158 pm_runtime_idle(dev);
596ba34b
RW
1159 }
1160}
1161
1162#else
1163
1164#define pm_genpd_prepare NULL
1165#define pm_genpd_suspend NULL
0496c8ae 1166#define pm_genpd_suspend_late NULL
596ba34b 1167#define pm_genpd_suspend_noirq NULL
0496c8ae 1168#define pm_genpd_resume_early NULL
596ba34b
RW
1169#define pm_genpd_resume_noirq NULL
1170#define pm_genpd_resume NULL
1171#define pm_genpd_freeze NULL
0496c8ae 1172#define pm_genpd_freeze_late NULL
596ba34b 1173#define pm_genpd_freeze_noirq NULL
0496c8ae 1174#define pm_genpd_thaw_early NULL
596ba34b
RW
1175#define pm_genpd_thaw_noirq NULL
1176#define pm_genpd_thaw NULL
596ba34b 1177#define pm_genpd_restore_noirq NULL
596ba34b
RW
1178#define pm_genpd_complete NULL
1179
1180#endif /* CONFIG_PM_SLEEP */
1181
f721889f 1182/**
b02c999a 1183 * __pm_genpd_add_device - Add a device to an I/O PM domain.
f721889f
RW
1184 * @genpd: PM domain to add the device to.
1185 * @dev: Device to be added.
b02c999a 1186 * @td: Set of PM QoS timing parameters to attach to the device.
f721889f 1187 */
b02c999a
RW
1188int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1189 struct gpd_timing_data *td)
f721889f 1190{
cd0ea672 1191 struct generic_pm_domain_data *gpd_data;
4605ab65 1192 struct pm_domain_data *pdd;
f721889f
RW
1193 int ret = 0;
1194
1195 dev_dbg(dev, "%s()\n", __func__);
1196
1197 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1198 return -EINVAL;
1199
17b75eca 1200 genpd_acquire_lock(genpd);
f721889f 1201
17b75eca 1202 if (genpd->status == GPD_STATE_POWER_OFF) {
f721889f
RW
1203 ret = -EINVAL;
1204 goto out;
1205 }
1206
596ba34b
RW
1207 if (genpd->prepared_count > 0) {
1208 ret = -EAGAIN;
1209 goto out;
1210 }
1211
4605ab65
RW
1212 list_for_each_entry(pdd, &genpd->dev_list, list_node)
1213 if (pdd->dev == dev) {
f721889f
RW
1214 ret = -EINVAL;
1215 goto out;
1216 }
1217
cd0ea672
RW
1218 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1219 if (!gpd_data) {
1220 ret = -ENOMEM;
1221 goto out;
1222 }
1223
596ba34b 1224 genpd->device_count++;
f721889f 1225
f721889f 1226 dev->pm_domain = &genpd->domain;
4605ab65 1227 dev_pm_get_subsys_data(dev);
cd0ea672
RW
1228 dev->power.subsys_data->domain_data = &gpd_data->base;
1229 gpd_data->base.dev = dev;
1230 gpd_data->need_restore = false;
1231 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
b02c999a
RW
1232 if (td)
1233 gpd_data->td = *td;
f721889f
RW
1234
1235 out:
17b75eca 1236 genpd_release_lock(genpd);
f721889f
RW
1237
1238 return ret;
1239}
1240
c8aa130b
TA
1241/**
1242 * __pm_genpd_of_add_device - Add a device to an I/O PM domain.
1243 * @genpd_node: Device tree node pointer representing a PM domain to which the
1244 * the device is added to.
1245 * @dev: Device to be added.
1246 * @td: Set of PM QoS timing parameters to attach to the device.
1247 */
1248int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
1249 struct gpd_timing_data *td)
1250{
1251 struct generic_pm_domain *genpd = NULL, *gpd;
1252
1253 dev_dbg(dev, "%s()\n", __func__);
1254
1255 if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
1256 return -EINVAL;
1257
1258 mutex_lock(&gpd_list_lock);
1259 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1260 if (gpd->of_node == genpd_node) {
1261 genpd = gpd;
1262 break;
1263 }
1264 }
1265 mutex_unlock(&gpd_list_lock);
1266
1267 if (!genpd)
1268 return -EINVAL;
1269
1270 return __pm_genpd_add_device(genpd, dev, td);
1271}
1272
f721889f
RW
1273/**
1274 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1275 * @genpd: PM domain to remove the device from.
1276 * @dev: Device to be removed.
1277 */
1278int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1279 struct device *dev)
1280{
4605ab65 1281 struct pm_domain_data *pdd;
efa69025 1282 int ret = 0;
f721889f
RW
1283
1284 dev_dbg(dev, "%s()\n", __func__);
1285
efa69025
RW
1286 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
1287 || IS_ERR_OR_NULL(dev->pm_domain)
1288 || pd_to_genpd(dev->pm_domain) != genpd)
f721889f
RW
1289 return -EINVAL;
1290
17b75eca 1291 genpd_acquire_lock(genpd);
f721889f 1292
596ba34b
RW
1293 if (genpd->prepared_count > 0) {
1294 ret = -EAGAIN;
1295 goto out;
1296 }
1297
efa69025
RW
1298 dev->pm_domain = NULL;
1299 pdd = dev->power.subsys_data->domain_data;
1300 list_del_init(&pdd->list_node);
1301 dev->power.subsys_data->domain_data = NULL;
1302 dev_pm_put_subsys_data(dev);
1303 kfree(to_gpd_data(pdd));
f721889f 1304
efa69025 1305 genpd->device_count--;
f721889f 1306
596ba34b 1307 out:
17b75eca 1308 genpd_release_lock(genpd);
f721889f
RW
1309
1310 return ret;
1311}
1312
1e78a0c7
RW
1313/**
1314 * pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device.
1315 * @dev: Device to set/unset the flag for.
1316 * @val: The new value of the device's "always on" flag.
1317 */
1318void pm_genpd_dev_always_on(struct device *dev, bool val)
1319{
1320 struct pm_subsys_data *psd;
1321 unsigned long flags;
1322
1323 spin_lock_irqsave(&dev->power.lock, flags);
1324
1325 psd = dev_to_psd(dev);
1326 if (psd && psd->domain_data)
1327 to_gpd_data(psd->domain_data)->always_on = val;
1328
1329 spin_unlock_irqrestore(&dev->power.lock, flags);
1330}
1331EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on);
1332
f721889f
RW
1333/**
1334 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1335 * @genpd: Master PM domain to add the subdomain to.
bc0403ff 1336 * @subdomain: Subdomain to be added.
f721889f
RW
1337 */
1338int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
bc0403ff 1339 struct generic_pm_domain *subdomain)
f721889f 1340{
5063ce15 1341 struct gpd_link *link;
f721889f
RW
1342 int ret = 0;
1343
bc0403ff 1344 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
f721889f
RW
1345 return -EINVAL;
1346
17b75eca
RW
1347 start:
1348 genpd_acquire_lock(genpd);
bc0403ff 1349 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
f721889f 1350
bc0403ff
RW
1351 if (subdomain->status != GPD_STATE_POWER_OFF
1352 && subdomain->status != GPD_STATE_ACTIVE) {
1353 mutex_unlock(&subdomain->lock);
17b75eca
RW
1354 genpd_release_lock(genpd);
1355 goto start;
1356 }
1357
1358 if (genpd->status == GPD_STATE_POWER_OFF
bc0403ff 1359 && subdomain->status != GPD_STATE_POWER_OFF) {
f721889f
RW
1360 ret = -EINVAL;
1361 goto out;
1362 }
1363
5063ce15 1364 list_for_each_entry(link, &genpd->slave_links, slave_node) {
bc0403ff 1365 if (link->slave == subdomain && link->master == genpd) {
f721889f
RW
1366 ret = -EINVAL;
1367 goto out;
1368 }
1369 }
1370
5063ce15
RW
1371 link = kzalloc(sizeof(*link), GFP_KERNEL);
1372 if (!link) {
1373 ret = -ENOMEM;
1374 goto out;
1375 }
1376 link->master = genpd;
1377 list_add_tail(&link->master_node, &genpd->master_links);
bc0403ff
RW
1378 link->slave = subdomain;
1379 list_add_tail(&link->slave_node, &subdomain->slave_links);
1380 if (subdomain->status != GPD_STATE_POWER_OFF)
c4bb3160 1381 genpd_sd_counter_inc(genpd);
f721889f 1382
f721889f 1383 out:
bc0403ff 1384 mutex_unlock(&subdomain->lock);
17b75eca 1385 genpd_release_lock(genpd);
f721889f
RW
1386
1387 return ret;
1388}
1389
1390/**
1391 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1392 * @genpd: Master PM domain to remove the subdomain from.
5063ce15 1393 * @subdomain: Subdomain to be removed.
f721889f
RW
1394 */
1395int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
5063ce15 1396 struct generic_pm_domain *subdomain)
f721889f 1397{
5063ce15 1398 struct gpd_link *link;
f721889f
RW
1399 int ret = -EINVAL;
1400
5063ce15 1401 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
f721889f
RW
1402 return -EINVAL;
1403
17b75eca
RW
1404 start:
1405 genpd_acquire_lock(genpd);
f721889f 1406
5063ce15
RW
1407 list_for_each_entry(link, &genpd->master_links, master_node) {
1408 if (link->slave != subdomain)
f721889f
RW
1409 continue;
1410
1411 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1412
17b75eca
RW
1413 if (subdomain->status != GPD_STATE_POWER_OFF
1414 && subdomain->status != GPD_STATE_ACTIVE) {
1415 mutex_unlock(&subdomain->lock);
1416 genpd_release_lock(genpd);
1417 goto start;
1418 }
1419
5063ce15
RW
1420 list_del(&link->master_node);
1421 list_del(&link->slave_node);
1422 kfree(link);
17b75eca 1423 if (subdomain->status != GPD_STATE_POWER_OFF)
f721889f
RW
1424 genpd_sd_counter_dec(genpd);
1425
1426 mutex_unlock(&subdomain->lock);
1427
1428 ret = 0;
1429 break;
1430 }
1431
17b75eca 1432 genpd_release_lock(genpd);
f721889f
RW
1433
1434 return ret;
1435}
1436
d5e4cbfe
RW
1437/**
1438 * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
1439 * @dev: Device to add the callbacks to.
1440 * @ops: Set of callbacks to add.
b02c999a 1441 * @td: Timing data to add to the device along with the callbacks (optional).
d5e4cbfe 1442 */
b02c999a
RW
1443int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
1444 struct gpd_timing_data *td)
d5e4cbfe
RW
1445{
1446 struct pm_domain_data *pdd;
1447 int ret = 0;
1448
1449 if (!(dev && dev->power.subsys_data && ops))
1450 return -EINVAL;
1451
1452 pm_runtime_disable(dev);
1453 device_pm_lock();
1454
1455 pdd = dev->power.subsys_data->domain_data;
1456 if (pdd) {
1457 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
1458
1459 gpd_data->ops = *ops;
b02c999a
RW
1460 if (td)
1461 gpd_data->td = *td;
d5e4cbfe
RW
1462 } else {
1463 ret = -EINVAL;
1464 }
1465
1466 device_pm_unlock();
1467 pm_runtime_enable(dev);
1468
1469 return ret;
1470}
1471EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
1472
1473/**
b02c999a 1474 * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
d5e4cbfe 1475 * @dev: Device to remove the callbacks from.
b02c999a 1476 * @clear_td: If set, clear the device's timing data too.
d5e4cbfe 1477 */
b02c999a 1478int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
d5e4cbfe
RW
1479{
1480 struct pm_domain_data *pdd;
1481 int ret = 0;
1482
1483 if (!(dev && dev->power.subsys_data))
1484 return -EINVAL;
1485
1486 pm_runtime_disable(dev);
1487 device_pm_lock();
1488
1489 pdd = dev->power.subsys_data->domain_data;
1490 if (pdd) {
1491 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
1492
1493 gpd_data->ops = (struct gpd_dev_ops){ 0 };
b02c999a
RW
1494 if (clear_td)
1495 gpd_data->td = (struct gpd_timing_data){ 0 };
d5e4cbfe
RW
1496 } else {
1497 ret = -EINVAL;
1498 }
1499
1500 device_pm_unlock();
1501 pm_runtime_enable(dev);
1502
1503 return ret;
1504}
b02c999a 1505EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
d5e4cbfe 1506
d23b9b00
RW
1507/* Default device callbacks for generic PM domains. */
1508
ecf00475
RW
1509/**
1510 * pm_genpd_default_save_state - Default "save device state" for PM domians.
1511 * @dev: Device to handle.
1512 */
1513static int pm_genpd_default_save_state(struct device *dev)
1514{
1515 int (*cb)(struct device *__dev);
1516 struct device_driver *drv = dev->driver;
1517
1518 cb = dev_gpd_data(dev)->ops.save_state;
1519 if (cb)
1520 return cb(dev);
1521
1522 if (drv && drv->pm && drv->pm->runtime_suspend)
1523 return drv->pm->runtime_suspend(dev);
1524
1525 return 0;
1526}
1527
1528/**
1529 * pm_genpd_default_restore_state - Default PM domians "restore device state".
1530 * @dev: Device to handle.
1531 */
1532static int pm_genpd_default_restore_state(struct device *dev)
1533{
1534 int (*cb)(struct device *__dev);
1535 struct device_driver *drv = dev->driver;
1536
1537 cb = dev_gpd_data(dev)->ops.restore_state;
1538 if (cb)
1539 return cb(dev);
1540
1541 if (drv && drv->pm && drv->pm->runtime_resume)
1542 return drv->pm->runtime_resume(dev);
1543
1544 return 0;
1545}
1546
0f1d6986
RW
1547#ifdef CONFIG_PM_SLEEP
1548
d23b9b00
RW
1549/**
1550 * pm_genpd_default_suspend - Default "device suspend" for PM domians.
1551 * @dev: Device to handle.
1552 */
1553static int pm_genpd_default_suspend(struct device *dev)
1554{
c9914854 1555 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
d23b9b00
RW
1556
1557 return cb ? cb(dev) : pm_generic_suspend(dev);
1558}
1559
1560/**
1561 * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
1562 * @dev: Device to handle.
1563 */
1564static int pm_genpd_default_suspend_late(struct device *dev)
1565{
c9914854 1566 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
d23b9b00 1567
0496c8ae 1568 return cb ? cb(dev) : pm_generic_suspend_late(dev);
d23b9b00
RW
1569}
1570
1571/**
1572 * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
1573 * @dev: Device to handle.
1574 */
1575static int pm_genpd_default_resume_early(struct device *dev)
1576{
c9914854 1577 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
d23b9b00 1578
0496c8ae 1579 return cb ? cb(dev) : pm_generic_resume_early(dev);
d23b9b00
RW
1580}
1581
1582/**
1583 * pm_genpd_default_resume - Default "device resume" for PM domians.
1584 * @dev: Device to handle.
1585 */
1586static int pm_genpd_default_resume(struct device *dev)
1587{
c9914854 1588 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
d23b9b00
RW
1589
1590 return cb ? cb(dev) : pm_generic_resume(dev);
1591}
1592
1593/**
1594 * pm_genpd_default_freeze - Default "device freeze" for PM domians.
1595 * @dev: Device to handle.
1596 */
1597static int pm_genpd_default_freeze(struct device *dev)
1598{
1599 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
1600
1601 return cb ? cb(dev) : pm_generic_freeze(dev);
1602}
1603
1604/**
1605 * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
1606 * @dev: Device to handle.
1607 */
1608static int pm_genpd_default_freeze_late(struct device *dev)
1609{
1610 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
1611
0496c8ae 1612 return cb ? cb(dev) : pm_generic_freeze_late(dev);
d23b9b00
RW
1613}
1614
1615/**
1616 * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
1617 * @dev: Device to handle.
1618 */
1619static int pm_genpd_default_thaw_early(struct device *dev)
1620{
1621 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
1622
0496c8ae 1623 return cb ? cb(dev) : pm_generic_thaw_early(dev);
d23b9b00
RW
1624}
1625
1626/**
1627 * pm_genpd_default_thaw - Default "device thaw" for PM domians.
1628 * @dev: Device to handle.
1629 */
1630static int pm_genpd_default_thaw(struct device *dev)
1631{
1632 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
1633
1634 return cb ? cb(dev) : pm_generic_thaw(dev);
1635}
1636
0f1d6986
RW
1637#else /* !CONFIG_PM_SLEEP */
1638
1639#define pm_genpd_default_suspend NULL
1640#define pm_genpd_default_suspend_late NULL
1641#define pm_genpd_default_resume_early NULL
1642#define pm_genpd_default_resume NULL
1643#define pm_genpd_default_freeze NULL
1644#define pm_genpd_default_freeze_late NULL
1645#define pm_genpd_default_thaw_early NULL
1646#define pm_genpd_default_thaw NULL
1647
1648#endif /* !CONFIG_PM_SLEEP */
1649
f721889f
RW
1650/**
1651 * pm_genpd_init - Initialize a generic I/O PM domain object.
1652 * @genpd: PM domain object to initialize.
1653 * @gov: PM domain governor to associate with the domain (may be NULL).
1654 * @is_off: Initial value of the domain's power_is_off field.
1655 */
1656void pm_genpd_init(struct generic_pm_domain *genpd,
1657 struct dev_power_governor *gov, bool is_off)
1658{
1659 if (IS_ERR_OR_NULL(genpd))
1660 return;
1661
5063ce15
RW
1662 INIT_LIST_HEAD(&genpd->master_links);
1663 INIT_LIST_HEAD(&genpd->slave_links);
f721889f 1664 INIT_LIST_HEAD(&genpd->dev_list);
f721889f
RW
1665 mutex_init(&genpd->lock);
1666 genpd->gov = gov;
1667 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1668 genpd->in_progress = 0;
c4bb3160 1669 atomic_set(&genpd->sd_count, 0);
17b75eca
RW
1670 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1671 init_waitqueue_head(&genpd->status_wait_queue);
c6d22b37
RW
1672 genpd->poweroff_task = NULL;
1673 genpd->resume_count = 0;
596ba34b 1674 genpd->device_count = 0;
221e9b58 1675 genpd->max_off_time_ns = -1;
f721889f
RW
1676 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1677 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1678 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
596ba34b
RW
1679 genpd->domain.ops.prepare = pm_genpd_prepare;
1680 genpd->domain.ops.suspend = pm_genpd_suspend;
0496c8ae 1681 genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
596ba34b
RW
1682 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1683 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
0496c8ae 1684 genpd->domain.ops.resume_early = pm_genpd_resume_early;
596ba34b
RW
1685 genpd->domain.ops.resume = pm_genpd_resume;
1686 genpd->domain.ops.freeze = pm_genpd_freeze;
0496c8ae 1687 genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
596ba34b
RW
1688 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1689 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
0496c8ae 1690 genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
596ba34b 1691 genpd->domain.ops.thaw = pm_genpd_thaw;
d23b9b00 1692 genpd->domain.ops.poweroff = pm_genpd_suspend;
0496c8ae 1693 genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
d23b9b00 1694 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
596ba34b 1695 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
0496c8ae 1696 genpd->domain.ops.restore_early = pm_genpd_resume_early;
d23b9b00 1697 genpd->domain.ops.restore = pm_genpd_resume;
596ba34b 1698 genpd->domain.ops.complete = pm_genpd_complete;
ecf00475
RW
1699 genpd->dev_ops.save_state = pm_genpd_default_save_state;
1700 genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
c9914854
RW
1701 genpd->dev_ops.suspend = pm_genpd_default_suspend;
1702 genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
1703 genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
1704 genpd->dev_ops.resume = pm_genpd_default_resume;
d23b9b00
RW
1705 genpd->dev_ops.freeze = pm_genpd_default_freeze;
1706 genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
1707 genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
1708 genpd->dev_ops.thaw = pm_genpd_default_thaw;
5125bbf3
RW
1709 mutex_lock(&gpd_list_lock);
1710 list_add(&genpd->gpd_list_node, &gpd_list);
1711 mutex_unlock(&gpd_list_lock);
1712}