PM / Domains: Make it possible to use domain names when adding devices
[linux-2.6-block.git] / drivers / base / power / domain.c
CommitLineData
f721889f
RW
1/*
2 * drivers/base/power/domain.c - Common code related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/io.h>
12#include <linux/pm_runtime.h>
13#include <linux/pm_domain.h>
6ff7bb0d 14#include <linux/pm_qos.h>
f721889f
RW
15#include <linux/slab.h>
16#include <linux/err.h>
17b75eca
RW
17#include <linux/sched.h>
18#include <linux/suspend.h>
d5e4cbfe
RW
19#include <linux/export.h>
20
21#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
22({ \
23 type (*__routine)(struct device *__d); \
24 type __ret = (type)0; \
25 \
26 __routine = genpd->dev_ops.callback; \
27 if (__routine) { \
28 __ret = __routine(dev); \
29 } else { \
30 __routine = dev_gpd_data(dev)->ops.callback; \
31 if (__routine) \
32 __ret = __routine(dev); \
33 } \
34 __ret; \
35})
f721889f 36
0140d8bd
RW
37#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \
38({ \
39 ktime_t __start = ktime_get(); \
40 type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \
41 s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \
6ff7bb0d
RW
42 struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \
43 if (!__retval && __elapsed > __td->field) { \
44 __td->field = __elapsed; \
0140d8bd
RW
45 dev_warn(dev, name " latency exceeded, new value %lld ns\n", \
46 __elapsed); \
6ff7bb0d
RW
47 genpd->max_off_time_changed = true; \
48 __td->constraint_changed = true; \
0140d8bd
RW
49 } \
50 __retval; \
51})
52
5125bbf3
RW
53static LIST_HEAD(gpd_list);
54static DEFINE_MUTEX(gpd_list_lock);
55
5248051b
RW
56#ifdef CONFIG_PM
57
b02c999a 58struct generic_pm_domain *dev_to_genpd(struct device *dev)
5248051b
RW
59{
60 if (IS_ERR_OR_NULL(dev->pm_domain))
61 return ERR_PTR(-EINVAL);
62
596ba34b 63 return pd_to_genpd(dev->pm_domain);
5248051b 64}
f721889f 65
d5e4cbfe
RW
66static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
67{
0140d8bd
RW
68 return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
69 stop_latency_ns, "stop");
d5e4cbfe
RW
70}
71
72static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
73{
0140d8bd
RW
74 return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
75 start_latency_ns, "start");
d5e4cbfe
RW
76}
77
e2e3e4e5
RW
78static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
79 struct device *dev)
80{
81 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
82}
83
c4bb3160 84static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
f721889f 85{
c4bb3160
RW
86 bool ret = false;
87
88 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
89 ret = !!atomic_dec_and_test(&genpd->sd_count);
90
91 return ret;
92}
93
94static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
95{
96 atomic_inc(&genpd->sd_count);
97 smp_mb__after_atomic_inc();
f721889f
RW
98}
99
17b75eca
RW
100static void genpd_acquire_lock(struct generic_pm_domain *genpd)
101{
102 DEFINE_WAIT(wait);
103
104 mutex_lock(&genpd->lock);
105 /*
106 * Wait for the domain to transition into either the active,
107 * or the power off state.
108 */
109 for (;;) {
110 prepare_to_wait(&genpd->status_wait_queue, &wait,
111 TASK_UNINTERRUPTIBLE);
c6d22b37
RW
112 if (genpd->status == GPD_STATE_ACTIVE
113 || genpd->status == GPD_STATE_POWER_OFF)
17b75eca
RW
114 break;
115 mutex_unlock(&genpd->lock);
116
117 schedule();
118
119 mutex_lock(&genpd->lock);
120 }
121 finish_wait(&genpd->status_wait_queue, &wait);
122}
123
124static void genpd_release_lock(struct generic_pm_domain *genpd)
125{
126 mutex_unlock(&genpd->lock);
127}
128
c6d22b37
RW
129static void genpd_set_active(struct generic_pm_domain *genpd)
130{
131 if (genpd->resume_count == 0)
132 genpd->status = GPD_STATE_ACTIVE;
133}
134
cbc9ef02
RW
135static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
136{
137 s64 usecs64;
138
139 if (!genpd->cpu_data)
140 return;
141
142 usecs64 = genpd->power_on_latency_ns;
143 do_div(usecs64, NSEC_PER_USEC);
144 usecs64 += genpd->cpu_data->saved_exit_latency;
145 genpd->cpu_data->idle_state->exit_latency = usecs64;
146}
147
5248051b 148/**
5063ce15 149 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
5248051b
RW
150 * @genpd: PM domain to power up.
151 *
5063ce15 152 * Restore power to @genpd and all of its masters so that it is possible to
5248051b
RW
153 * resume a device belonging to it.
154 */
8951ef02 155static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
3f241775 156 __releases(&genpd->lock) __acquires(&genpd->lock)
5248051b 157{
5063ce15 158 struct gpd_link *link;
3f241775 159 DEFINE_WAIT(wait);
5248051b
RW
160 int ret = 0;
161
5063ce15 162 /* If the domain's master is being waited for, we have to wait too. */
3f241775
RW
163 for (;;) {
164 prepare_to_wait(&genpd->status_wait_queue, &wait,
165 TASK_UNINTERRUPTIBLE);
17877eb5 166 if (genpd->status != GPD_STATE_WAIT_MASTER)
3f241775
RW
167 break;
168 mutex_unlock(&genpd->lock);
17b75eca 169
3f241775
RW
170 schedule();
171
172 mutex_lock(&genpd->lock);
173 }
174 finish_wait(&genpd->status_wait_queue, &wait);
9e08cf42 175
17b75eca 176 if (genpd->status == GPD_STATE_ACTIVE
596ba34b 177 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
3f241775 178 return 0;
5248051b 179
c6d22b37
RW
180 if (genpd->status != GPD_STATE_POWER_OFF) {
181 genpd_set_active(genpd);
3f241775 182 return 0;
c6d22b37
RW
183 }
184
cbc9ef02
RW
185 if (genpd->cpu_data) {
186 cpuidle_pause_and_lock();
187 genpd->cpu_data->idle_state->disabled = true;
188 cpuidle_resume_and_unlock();
189 goto out;
190 }
191
5063ce15
RW
192 /*
193 * The list is guaranteed not to change while the loop below is being
194 * executed, unless one of the masters' .power_on() callbacks fiddles
195 * with it.
196 */
197 list_for_each_entry(link, &genpd->slave_links, slave_node) {
198 genpd_sd_counter_inc(link->master);
17877eb5 199 genpd->status = GPD_STATE_WAIT_MASTER;
3c07cbc4 200
5248051b 201 mutex_unlock(&genpd->lock);
5248051b 202
5063ce15 203 ret = pm_genpd_poweron(link->master);
9e08cf42
RW
204
205 mutex_lock(&genpd->lock);
206
3f241775
RW
207 /*
208 * The "wait for parent" status is guaranteed not to change
5063ce15 209 * while the master is powering on.
3f241775
RW
210 */
211 genpd->status = GPD_STATE_POWER_OFF;
212 wake_up_all(&genpd->status_wait_queue);
5063ce15
RW
213 if (ret) {
214 genpd_sd_counter_dec(link->master);
9e08cf42 215 goto err;
5063ce15 216 }
5248051b
RW
217 }
218
9e08cf42 219 if (genpd->power_on) {
0140d8bd
RW
220 ktime_t time_start = ktime_get();
221 s64 elapsed_ns;
222
fe202fde 223 ret = genpd->power_on(genpd);
9e08cf42
RW
224 if (ret)
225 goto err;
0140d8bd
RW
226
227 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
e84b2c20 228 if (elapsed_ns > genpd->power_on_latency_ns) {
0140d8bd 229 genpd->power_on_latency_ns = elapsed_ns;
6ff7bb0d 230 genpd->max_off_time_changed = true;
cbc9ef02 231 genpd_recalc_cpu_exit_latency(genpd);
e84b2c20
RW
232 if (genpd->name)
233 pr_warning("%s: Power-on latency exceeded, "
234 "new value %lld ns\n", genpd->name,
235 elapsed_ns);
236 }
3c07cbc4 237 }
5248051b 238
cbc9ef02 239 out:
9e08cf42
RW
240 genpd_set_active(genpd);
241
3f241775 242 return 0;
9e08cf42
RW
243
244 err:
5063ce15
RW
245 list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
246 genpd_sd_counter_dec(link->master);
9e08cf42 247
3f241775
RW
248 return ret;
249}
250
251/**
5063ce15 252 * pm_genpd_poweron - Restore power to a given PM domain and its masters.
3f241775
RW
253 * @genpd: PM domain to power up.
254 */
255int pm_genpd_poweron(struct generic_pm_domain *genpd)
256{
257 int ret;
258
259 mutex_lock(&genpd->lock);
260 ret = __pm_genpd_poweron(genpd);
261 mutex_unlock(&genpd->lock);
262 return ret;
5248051b
RW
263}
264
265#endif /* CONFIG_PM */
266
267#ifdef CONFIG_PM_RUNTIME
268
8e9afafd
RW
269static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
270{
271 return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
272 save_state_latency_ns, "state save");
273}
274
275static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
276{
277 return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
278 restore_state_latency_ns,
279 "state restore");
280}
281
6ff7bb0d
RW
282static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
283 unsigned long val, void *ptr)
284{
285 struct generic_pm_domain_data *gpd_data;
286 struct device *dev;
287
288 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
289
290 mutex_lock(&gpd_data->lock);
291 dev = gpd_data->base.dev;
292 if (!dev) {
293 mutex_unlock(&gpd_data->lock);
294 return NOTIFY_DONE;
295 }
296 mutex_unlock(&gpd_data->lock);
297
298 for (;;) {
299 struct generic_pm_domain *genpd;
300 struct pm_domain_data *pdd;
301
302 spin_lock_irq(&dev->power.lock);
303
304 pdd = dev->power.subsys_data ?
305 dev->power.subsys_data->domain_data : NULL;
1d5fcfec 306 if (pdd && pdd->dev) {
6ff7bb0d
RW
307 to_gpd_data(pdd)->td.constraint_changed = true;
308 genpd = dev_to_genpd(dev);
309 } else {
310 genpd = ERR_PTR(-ENODATA);
311 }
312
313 spin_unlock_irq(&dev->power.lock);
314
315 if (!IS_ERR(genpd)) {
316 mutex_lock(&genpd->lock);
317 genpd->max_off_time_changed = true;
318 mutex_unlock(&genpd->lock);
319 }
320
321 dev = dev->parent;
322 if (!dev || dev->power.ignore_children)
323 break;
324 }
325
326 return NOTIFY_DONE;
327}
328
f721889f
RW
329/**
330 * __pm_genpd_save_device - Save the pre-suspend state of a device.
4605ab65 331 * @pdd: Domain data of the device to save the state of.
f721889f
RW
332 * @genpd: PM domain the device belongs to.
333 */
4605ab65 334static int __pm_genpd_save_device(struct pm_domain_data *pdd,
f721889f 335 struct generic_pm_domain *genpd)
17b75eca 336 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f 337{
cd0ea672 338 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
4605ab65 339 struct device *dev = pdd->dev;
f721889f
RW
340 int ret = 0;
341
cd0ea672 342 if (gpd_data->need_restore)
f721889f
RW
343 return 0;
344
17b75eca
RW
345 mutex_unlock(&genpd->lock);
346
ecf00475
RW
347 genpd_start_dev(genpd, dev);
348 ret = genpd_save_dev(genpd, dev);
349 genpd_stop_dev(genpd, dev);
f721889f 350
17b75eca
RW
351 mutex_lock(&genpd->lock);
352
f721889f 353 if (!ret)
cd0ea672 354 gpd_data->need_restore = true;
f721889f
RW
355
356 return ret;
357}
358
359/**
360 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
4605ab65 361 * @pdd: Domain data of the device to restore the state of.
f721889f
RW
362 * @genpd: PM domain the device belongs to.
363 */
4605ab65 364static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
f721889f 365 struct generic_pm_domain *genpd)
17b75eca 366 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f 367{
cd0ea672 368 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
4605ab65 369 struct device *dev = pdd->dev;
80de3d7f 370 bool need_restore = gpd_data->need_restore;
f721889f 371
80de3d7f 372 gpd_data->need_restore = false;
17b75eca
RW
373 mutex_unlock(&genpd->lock);
374
ecf00475 375 genpd_start_dev(genpd, dev);
80de3d7f
RW
376 if (need_restore)
377 genpd_restore_dev(genpd, dev);
f721889f 378
17b75eca 379 mutex_lock(&genpd->lock);
f721889f
RW
380}
381
c6d22b37
RW
382/**
383 * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
384 * @genpd: PM domain to check.
385 *
386 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
387 * a "power off" operation, which means that a "power on" has occured in the
388 * meantime, or if its resume_count field is different from zero, which means
389 * that one of its devices has been resumed in the meantime.
390 */
391static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
392{
17877eb5 393 return genpd->status == GPD_STATE_WAIT_MASTER
3f241775 394 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
c6d22b37
RW
395}
396
56375fd4
RW
397/**
398 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
399 * @genpd: PM domait to power off.
400 *
401 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
402 * before.
403 */
0bc5b2de 404void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
56375fd4
RW
405{
406 if (!work_pending(&genpd->power_off_work))
407 queue_work(pm_wq, &genpd->power_off_work);
408}
409
f721889f
RW
410/**
411 * pm_genpd_poweroff - Remove power from a given PM domain.
412 * @genpd: PM domain to power down.
413 *
414 * If all of the @genpd's devices have been suspended and all of its subdomains
415 * have been powered down, run the runtime suspend callbacks provided by all of
416 * the @genpd's devices' drivers and remove power from @genpd.
417 */
418static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
17b75eca 419 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f 420{
4605ab65 421 struct pm_domain_data *pdd;
5063ce15 422 struct gpd_link *link;
f721889f 423 unsigned int not_suspended;
c6d22b37 424 int ret = 0;
f721889f 425
c6d22b37
RW
426 start:
427 /*
428 * Do not try to power off the domain in the following situations:
429 * (1) The domain is already in the "power off" state.
5063ce15 430 * (2) The domain is waiting for its master to power up.
c6d22b37 431 * (3) One of the domain's devices is being resumed right now.
3f241775 432 * (4) System suspend is in progress.
c6d22b37 433 */
3f241775 434 if (genpd->status == GPD_STATE_POWER_OFF
17877eb5 435 || genpd->status == GPD_STATE_WAIT_MASTER
3f241775 436 || genpd->resume_count > 0 || genpd->prepared_count > 0)
f721889f
RW
437 return 0;
438
c4bb3160 439 if (atomic_read(&genpd->sd_count) > 0)
f721889f
RW
440 return -EBUSY;
441
442 not_suspended = 0;
4605ab65 443 list_for_each_entry(pdd, &genpd->dev_list, list_node)
0aa2a221 444 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
feb70af0 445 || pdd->dev->power.irq_safe))
f721889f
RW
446 not_suspended++;
447
448 if (not_suspended > genpd->in_progress)
449 return -EBUSY;
450
c6d22b37
RW
451 if (genpd->poweroff_task) {
452 /*
453 * Another instance of pm_genpd_poweroff() is executing
454 * callbacks, so tell it to start over and return.
455 */
456 genpd->status = GPD_STATE_REPEAT;
457 return 0;
458 }
459
f721889f
RW
460 if (genpd->gov && genpd->gov->power_down_ok) {
461 if (!genpd->gov->power_down_ok(&genpd->domain))
462 return -EAGAIN;
463 }
464
17b75eca 465 genpd->status = GPD_STATE_BUSY;
c6d22b37 466 genpd->poweroff_task = current;
17b75eca 467
4605ab65 468 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
3c07cbc4 469 ret = atomic_read(&genpd->sd_count) == 0 ?
4605ab65 470 __pm_genpd_save_device(pdd, genpd) : -EBUSY;
3f241775
RW
471
472 if (genpd_abort_poweroff(genpd))
473 goto out;
474
697a7f37
RW
475 if (ret) {
476 genpd_set_active(genpd);
477 goto out;
478 }
f721889f 479
c6d22b37
RW
480 if (genpd->status == GPD_STATE_REPEAT) {
481 genpd->poweroff_task = NULL;
482 goto start;
483 }
484 }
17b75eca 485
cbc9ef02
RW
486 if (genpd->cpu_data) {
487 /*
488 * If cpu_data is set, cpuidle should turn the domain off when
489 * the CPU in it is idle. In that case we don't decrement the
490 * subdomain counts of the master domains, so that power is not
491 * removed from the current domain prematurely as a result of
492 * cutting off the masters' power.
493 */
494 genpd->status = GPD_STATE_POWER_OFF;
495 cpuidle_pause_and_lock();
496 genpd->cpu_data->idle_state->disabled = false;
497 cpuidle_resume_and_unlock();
498 goto out;
499 }
500
3c07cbc4 501 if (genpd->power_off) {
0140d8bd
RW
502 ktime_t time_start;
503 s64 elapsed_ns;
504
3c07cbc4
RW
505 if (atomic_read(&genpd->sd_count) > 0) {
506 ret = -EBUSY;
c6d22b37
RW
507 goto out;
508 }
17b75eca 509
0140d8bd
RW
510 time_start = ktime_get();
511
3c07cbc4 512 /*
5063ce15
RW
513 * If sd_count > 0 at this point, one of the subdomains hasn't
514 * managed to call pm_genpd_poweron() for the master yet after
3c07cbc4
RW
515 * incrementing it. In that case pm_genpd_poweron() will wait
516 * for us to drop the lock, so we can call .power_off() and let
517 * the pm_genpd_poweron() restore power for us (this shouldn't
518 * happen very often).
519 */
d2805402
RW
520 ret = genpd->power_off(genpd);
521 if (ret == -EBUSY) {
522 genpd_set_active(genpd);
d2805402
RW
523 goto out;
524 }
0140d8bd
RW
525
526 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
e84b2c20 527 if (elapsed_ns > genpd->power_off_latency_ns) {
0140d8bd 528 genpd->power_off_latency_ns = elapsed_ns;
6ff7bb0d 529 genpd->max_off_time_changed = true;
e84b2c20
RW
530 if (genpd->name)
531 pr_warning("%s: Power-off latency exceeded, "
532 "new value %lld ns\n", genpd->name,
533 elapsed_ns);
534 }
d2805402 535 }
f721889f 536
17b75eca 537 genpd->status = GPD_STATE_POWER_OFF;
221e9b58 538
5063ce15
RW
539 list_for_each_entry(link, &genpd->slave_links, slave_node) {
540 genpd_sd_counter_dec(link->master);
541 genpd_queue_power_off_work(link->master);
542 }
f721889f 543
c6d22b37
RW
544 out:
545 genpd->poweroff_task = NULL;
546 wake_up_all(&genpd->status_wait_queue);
547 return ret;
f721889f
RW
548}
549
550/**
551 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
552 * @work: Work structure used for scheduling the execution of this function.
553 */
554static void genpd_power_off_work_fn(struct work_struct *work)
555{
556 struct generic_pm_domain *genpd;
557
558 genpd = container_of(work, struct generic_pm_domain, power_off_work);
559
17b75eca 560 genpd_acquire_lock(genpd);
f721889f 561 pm_genpd_poweroff(genpd);
17b75eca 562 genpd_release_lock(genpd);
f721889f
RW
563}
564
565/**
566 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
567 * @dev: Device to suspend.
568 *
569 * Carry out a runtime suspend of a device under the assumption that its
570 * pm_domain field points to the domain member of an object of type
571 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
572 */
573static int pm_genpd_runtime_suspend(struct device *dev)
574{
575 struct generic_pm_domain *genpd;
b02c999a 576 bool (*stop_ok)(struct device *__dev);
d5e4cbfe 577 int ret;
f721889f
RW
578
579 dev_dbg(dev, "%s()\n", __func__);
580
5248051b
RW
581 genpd = dev_to_genpd(dev);
582 if (IS_ERR(genpd))
f721889f
RW
583 return -EINVAL;
584
0aa2a221
RW
585 might_sleep_if(!genpd->dev_irq_safe);
586
b02c999a
RW
587 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
588 if (stop_ok && !stop_ok(dev))
589 return -EBUSY;
590
d5e4cbfe
RW
591 ret = genpd_stop_dev(genpd, dev);
592 if (ret)
593 return ret;
17b75eca 594
0aa2a221
RW
595 /*
596 * If power.irq_safe is set, this routine will be run with interrupts
597 * off, so it can't use mutexes.
598 */
599 if (dev->power.irq_safe)
600 return 0;
601
c6d22b37 602 mutex_lock(&genpd->lock);
f721889f
RW
603 genpd->in_progress++;
604 pm_genpd_poweroff(genpd);
605 genpd->in_progress--;
c6d22b37 606 mutex_unlock(&genpd->lock);
f721889f
RW
607
608 return 0;
609}
610
f721889f
RW
611/**
612 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
613 * @dev: Device to resume.
614 *
615 * Carry out a runtime resume of a device under the assumption that its
616 * pm_domain field points to the domain member of an object of type
617 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
618 */
619static int pm_genpd_runtime_resume(struct device *dev)
620{
621 struct generic_pm_domain *genpd;
c6d22b37 622 DEFINE_WAIT(wait);
f721889f
RW
623 int ret;
624
625 dev_dbg(dev, "%s()\n", __func__);
626
5248051b
RW
627 genpd = dev_to_genpd(dev);
628 if (IS_ERR(genpd))
f721889f
RW
629 return -EINVAL;
630
0aa2a221
RW
631 might_sleep_if(!genpd->dev_irq_safe);
632
633 /* If power.irq_safe, the PM domain is never powered off. */
634 if (dev->power.irq_safe)
e2e3e4e5 635 return genpd_start_dev_no_timing(genpd, dev);
0aa2a221 636
c6d22b37 637 mutex_lock(&genpd->lock);
3f241775
RW
638 ret = __pm_genpd_poweron(genpd);
639 if (ret) {
640 mutex_unlock(&genpd->lock);
641 return ret;
642 }
17b75eca 643 genpd->status = GPD_STATE_BUSY;
c6d22b37
RW
644 genpd->resume_count++;
645 for (;;) {
646 prepare_to_wait(&genpd->status_wait_queue, &wait,
647 TASK_UNINTERRUPTIBLE);
648 /*
649 * If current is the powering off task, we have been called
650 * reentrantly from one of the device callbacks, so we should
651 * not wait.
652 */
653 if (!genpd->poweroff_task || genpd->poweroff_task == current)
654 break;
655 mutex_unlock(&genpd->lock);
656
657 schedule();
658
659 mutex_lock(&genpd->lock);
660 }
661 finish_wait(&genpd->status_wait_queue, &wait);
cd0ea672 662 __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
c6d22b37
RW
663 genpd->resume_count--;
664 genpd_set_active(genpd);
17b75eca 665 wake_up_all(&genpd->status_wait_queue);
c6d22b37 666 mutex_unlock(&genpd->lock);
17b75eca 667
f721889f
RW
668 return 0;
669}
670
17f2ae7f
RW
671/**
672 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
673 */
674void pm_genpd_poweroff_unused(void)
675{
676 struct generic_pm_domain *genpd;
677
678 mutex_lock(&gpd_list_lock);
679
680 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
681 genpd_queue_power_off_work(genpd);
682
683 mutex_unlock(&gpd_list_lock);
684}
685
f721889f
RW
686#else
687
6ff7bb0d
RW
688static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
689 unsigned long val, void *ptr)
690{
691 return NOTIFY_DONE;
692}
693
f721889f
RW
694static inline void genpd_power_off_work_fn(struct work_struct *work) {}
695
696#define pm_genpd_runtime_suspend NULL
697#define pm_genpd_runtime_resume NULL
698
699#endif /* CONFIG_PM_RUNTIME */
700
596ba34b
RW
701#ifdef CONFIG_PM_SLEEP
702
77f827de
RW
703/**
704 * pm_genpd_present - Check if the given PM domain has been initialized.
705 * @genpd: PM domain to check.
706 */
707static bool pm_genpd_present(struct generic_pm_domain *genpd)
708{
709 struct generic_pm_domain *gpd;
710
711 if (IS_ERR_OR_NULL(genpd))
712 return false;
713
714 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
715 if (gpd == genpd)
716 return true;
717
718 return false;
719}
720
d5e4cbfe
RW
721static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
722 struct device *dev)
723{
724 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
725}
726
d23b9b00
RW
727static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
728{
729 return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
730}
731
732static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
733{
734 return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
735}
736
737static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
738{
739 return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
740}
741
742static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
743{
744 return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
745}
746
747static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
748{
749 return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
750}
751
752static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
753{
754 return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
755}
756
757static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
758{
759 return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
760}
761
762static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
763{
764 return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
765}
766
596ba34b 767/**
5063ce15 768 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
596ba34b
RW
769 * @genpd: PM domain to power off, if possible.
770 *
771 * Check if the given PM domain can be powered off (during system suspend or
5063ce15 772 * hibernation) and do that if so. Also, in that case propagate to its masters.
596ba34b 773 *
77f827de
RW
774 * This function is only called in "noirq" and "syscore" stages of system power
775 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
776 * executed sequentially, so it is guaranteed that it will never run twice in
777 * parallel).
596ba34b
RW
778 */
779static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
780{
5063ce15 781 struct gpd_link *link;
596ba34b 782
17b75eca 783 if (genpd->status == GPD_STATE_POWER_OFF)
596ba34b
RW
784 return;
785
c4bb3160
RW
786 if (genpd->suspended_count != genpd->device_count
787 || atomic_read(&genpd->sd_count) > 0)
596ba34b
RW
788 return;
789
790 if (genpd->power_off)
791 genpd->power_off(genpd);
792
17b75eca 793 genpd->status = GPD_STATE_POWER_OFF;
5063ce15
RW
794
795 list_for_each_entry(link, &genpd->slave_links, slave_node) {
796 genpd_sd_counter_dec(link->master);
797 pm_genpd_sync_poweroff(link->master);
596ba34b
RW
798 }
799}
800
802d8b49
RW
801/**
802 * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
803 * @genpd: PM domain to power on.
804 *
77f827de
RW
805 * This function is only called in "noirq" and "syscore" stages of system power
806 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
807 * executed sequentially, so it is guaranteed that it will never run twice in
808 * parallel).
802d8b49
RW
809 */
810static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
811{
812 struct gpd_link *link;
813
814 if (genpd->status != GPD_STATE_POWER_OFF)
815 return;
816
817 list_for_each_entry(link, &genpd->slave_links, slave_node) {
818 pm_genpd_sync_poweron(link->master);
819 genpd_sd_counter_inc(link->master);
820 }
821
822 if (genpd->power_on)
823 genpd->power_on(genpd);
824
825 genpd->status = GPD_STATE_ACTIVE;
826}
827
4ecd6e65
RW
828/**
829 * resume_needed - Check whether to resume a device before system suspend.
830 * @dev: Device to check.
831 * @genpd: PM domain the device belongs to.
832 *
833 * There are two cases in which a device that can wake up the system from sleep
834 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
835 * to wake up the system and it has to remain active for this purpose while the
836 * system is in the sleep state and (2) if the device is not enabled to wake up
837 * the system from sleep states and it generally doesn't generate wakeup signals
838 * by itself (those signals are generated on its behalf by other parts of the
839 * system). In the latter case it may be necessary to reconfigure the device's
840 * wakeup settings during system suspend, because it may have been set up to
841 * signal remote wakeup from the system's working state as needed by runtime PM.
842 * Return 'true' in either of the above cases.
843 */
844static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
845{
846 bool active_wakeup;
847
848 if (!device_can_wakeup(dev))
849 return false;
850
d5e4cbfe 851 active_wakeup = genpd_dev_active_wakeup(genpd, dev);
4ecd6e65
RW
852 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
853}
854
596ba34b
RW
855/**
856 * pm_genpd_prepare - Start power transition of a device in a PM domain.
857 * @dev: Device to start the transition of.
858 *
859 * Start a power transition of a device (during a system-wide power transition)
860 * under the assumption that its pm_domain field points to the domain member of
861 * an object of type struct generic_pm_domain representing a PM domain
862 * consisting of I/O devices.
863 */
864static int pm_genpd_prepare(struct device *dev)
865{
866 struct generic_pm_domain *genpd;
b6c10c84 867 int ret;
596ba34b
RW
868
869 dev_dbg(dev, "%s()\n", __func__);
870
871 genpd = dev_to_genpd(dev);
872 if (IS_ERR(genpd))
873 return -EINVAL;
874
17b75eca
RW
875 /*
876 * If a wakeup request is pending for the device, it should be woken up
877 * at this point and a system wakeup event should be reported if it's
878 * set up to wake up the system from sleep states.
879 */
880 pm_runtime_get_noresume(dev);
881 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
882 pm_wakeup_event(dev, 0);
883
884 if (pm_wakeup_pending()) {
885 pm_runtime_put_sync(dev);
886 return -EBUSY;
887 }
888
4ecd6e65
RW
889 if (resume_needed(dev, genpd))
890 pm_runtime_resume(dev);
891
17b75eca 892 genpd_acquire_lock(genpd);
596ba34b 893
65533bbf
RW
894 if (genpd->prepared_count++ == 0) {
895 genpd->suspended_count = 0;
17b75eca 896 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
65533bbf 897 }
17b75eca
RW
898
899 genpd_release_lock(genpd);
596ba34b
RW
900
901 if (genpd->suspend_power_off) {
17b75eca 902 pm_runtime_put_noidle(dev);
596ba34b
RW
903 return 0;
904 }
905
906 /*
17b75eca
RW
907 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
908 * so pm_genpd_poweron() will return immediately, but if the device
d5e4cbfe 909 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
17b75eca 910 * to make it operational.
596ba34b 911 */
17b75eca 912 pm_runtime_resume(dev);
596ba34b
RW
913 __pm_runtime_disable(dev, false);
914
b6c10c84
RW
915 ret = pm_generic_prepare(dev);
916 if (ret) {
917 mutex_lock(&genpd->lock);
918
919 if (--genpd->prepared_count == 0)
920 genpd->suspend_power_off = false;
921
922 mutex_unlock(&genpd->lock);
17b75eca 923 pm_runtime_enable(dev);
b6c10c84 924 }
17b75eca
RW
925
926 pm_runtime_put_sync(dev);
b6c10c84 927 return ret;
596ba34b
RW
928}
929
930/**
931 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
932 * @dev: Device to suspend.
933 *
934 * Suspend a device under the assumption that its pm_domain field points to the
935 * domain member of an object of type struct generic_pm_domain representing
936 * a PM domain consisting of I/O devices.
937 */
938static int pm_genpd_suspend(struct device *dev)
939{
940 struct generic_pm_domain *genpd;
941
942 dev_dbg(dev, "%s()\n", __func__);
943
944 genpd = dev_to_genpd(dev);
945 if (IS_ERR(genpd))
946 return -EINVAL;
947
d23b9b00 948 return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
596ba34b
RW
949}
950
951/**
0496c8ae 952 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
596ba34b
RW
953 * @dev: Device to suspend.
954 *
955 * Carry out a late suspend of a device under the assumption that its
956 * pm_domain field points to the domain member of an object of type
957 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
958 */
0496c8ae 959static int pm_genpd_suspend_late(struct device *dev)
596ba34b
RW
960{
961 struct generic_pm_domain *genpd;
596ba34b
RW
962
963 dev_dbg(dev, "%s()\n", __func__);
964
965 genpd = dev_to_genpd(dev);
966 if (IS_ERR(genpd))
967 return -EINVAL;
968
0496c8ae
RW
969 return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev);
970}
596ba34b 971
0496c8ae
RW
972/**
973 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
974 * @dev: Device to suspend.
975 *
976 * Stop the device and remove power from the domain if all devices in it have
977 * been stopped.
978 */
979static int pm_genpd_suspend_noirq(struct device *dev)
980{
981 struct generic_pm_domain *genpd;
982
983 dev_dbg(dev, "%s()\n", __func__);
984
985 genpd = dev_to_genpd(dev);
986 if (IS_ERR(genpd))
987 return -EINVAL;
596ba34b 988
dbf37414 989 if (genpd->suspend_power_off
0496c8ae 990 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
d4f2d87a
RW
991 return 0;
992
d5e4cbfe 993 genpd_stop_dev(genpd, dev);
596ba34b
RW
994
995 /*
996 * Since all of the "noirq" callbacks are executed sequentially, it is
997 * guaranteed that this function will never run twice in parallel for
998 * the same PM domain, so it is not necessary to use locking here.
999 */
1000 genpd->suspended_count++;
1001 pm_genpd_sync_poweroff(genpd);
1002
1003 return 0;
1004}
1005
1006/**
0496c8ae 1007 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
596ba34b
RW
1008 * @dev: Device to resume.
1009 *
0496c8ae 1010 * Restore power to the device's PM domain, if necessary, and start the device.
596ba34b
RW
1011 */
1012static int pm_genpd_resume_noirq(struct device *dev)
1013{
1014 struct generic_pm_domain *genpd;
1015
1016 dev_dbg(dev, "%s()\n", __func__);
1017
1018 genpd = dev_to_genpd(dev);
1019 if (IS_ERR(genpd))
1020 return -EINVAL;
1021
dbf37414 1022 if (genpd->suspend_power_off
cc85b207 1023 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
596ba34b
RW
1024 return 0;
1025
1026 /*
1027 * Since all of the "noirq" callbacks are executed sequentially, it is
1028 * guaranteed that this function will never run twice in parallel for
1029 * the same PM domain, so it is not necessary to use locking here.
1030 */
802d8b49 1031 pm_genpd_sync_poweron(genpd);
596ba34b 1032 genpd->suspended_count--;
596ba34b 1033
0496c8ae 1034 return genpd_start_dev(genpd, dev);
596ba34b
RW
1035}
1036
1037/**
0496c8ae
RW
1038 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
1039 * @dev: Device to resume.
1040 *
1041 * Carry out an early resume of a device under the assumption that its
1042 * pm_domain field points to the domain member of an object of type
1043 * struct generic_pm_domain representing a power domain consisting of I/O
1044 * devices.
1045 */
1046static int pm_genpd_resume_early(struct device *dev)
1047{
1048 struct generic_pm_domain *genpd;
1049
1050 dev_dbg(dev, "%s()\n", __func__);
1051
1052 genpd = dev_to_genpd(dev);
1053 if (IS_ERR(genpd))
1054 return -EINVAL;
1055
1056 return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev);
1057}
1058
1059/**
1060 * pm_genpd_resume - Resume of device in an I/O PM domain.
596ba34b
RW
1061 * @dev: Device to resume.
1062 *
1063 * Resume a device under the assumption that its pm_domain field points to the
1064 * domain member of an object of type struct generic_pm_domain representing
1065 * a power domain consisting of I/O devices.
1066 */
1067static int pm_genpd_resume(struct device *dev)
1068{
1069 struct generic_pm_domain *genpd;
1070
1071 dev_dbg(dev, "%s()\n", __func__);
1072
1073 genpd = dev_to_genpd(dev);
1074 if (IS_ERR(genpd))
1075 return -EINVAL;
1076
d23b9b00 1077 return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
596ba34b
RW
1078}
1079
1080/**
0496c8ae 1081 * pm_genpd_freeze - Freezing a device in an I/O PM domain.
596ba34b
RW
1082 * @dev: Device to freeze.
1083 *
1084 * Freeze a device under the assumption that its pm_domain field points to the
1085 * domain member of an object of type struct generic_pm_domain representing
1086 * a power domain consisting of I/O devices.
1087 */
1088static int pm_genpd_freeze(struct device *dev)
1089{
1090 struct generic_pm_domain *genpd;
1091
1092 dev_dbg(dev, "%s()\n", __func__);
1093
1094 genpd = dev_to_genpd(dev);
1095 if (IS_ERR(genpd))
1096 return -EINVAL;
1097
d23b9b00 1098 return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
596ba34b
RW
1099}
1100
1101/**
0496c8ae
RW
1102 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
1103 * @dev: Device to freeze.
1104 *
1105 * Carry out a late freeze of a device under the assumption that its
1106 * pm_domain field points to the domain member of an object of type
1107 * struct generic_pm_domain representing a power domain consisting of I/O
1108 * devices.
1109 */
1110static int pm_genpd_freeze_late(struct device *dev)
1111{
1112 struct generic_pm_domain *genpd;
1113
1114 dev_dbg(dev, "%s()\n", __func__);
1115
1116 genpd = dev_to_genpd(dev);
1117 if (IS_ERR(genpd))
1118 return -EINVAL;
1119
1120 return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev);
1121}
1122
1123/**
1124 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
596ba34b
RW
1125 * @dev: Device to freeze.
1126 *
1127 * Carry out a late freeze of a device under the assumption that its
1128 * pm_domain field points to the domain member of an object of type
1129 * struct generic_pm_domain representing a power domain consisting of I/O
1130 * devices.
1131 */
1132static int pm_genpd_freeze_noirq(struct device *dev)
1133{
1134 struct generic_pm_domain *genpd;
596ba34b
RW
1135
1136 dev_dbg(dev, "%s()\n", __func__);
1137
1138 genpd = dev_to_genpd(dev);
1139 if (IS_ERR(genpd))
1140 return -EINVAL;
1141
dbf37414 1142 return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
0496c8ae 1143}
596ba34b 1144
0496c8ae
RW
1145/**
1146 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1147 * @dev: Device to thaw.
1148 *
1149 * Start the device, unless power has been removed from the domain already
1150 * before the system transition.
1151 */
1152static int pm_genpd_thaw_noirq(struct device *dev)
1153{
1154 struct generic_pm_domain *genpd;
596ba34b 1155
0496c8ae 1156 dev_dbg(dev, "%s()\n", __func__);
596ba34b 1157
0496c8ae
RW
1158 genpd = dev_to_genpd(dev);
1159 if (IS_ERR(genpd))
1160 return -EINVAL;
1161
dbf37414 1162 return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
596ba34b
RW
1163}
1164
1165/**
0496c8ae 1166 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
596ba34b
RW
1167 * @dev: Device to thaw.
1168 *
1169 * Carry out an early thaw of a device under the assumption that its
1170 * pm_domain field points to the domain member of an object of type
1171 * struct generic_pm_domain representing a power domain consisting of I/O
1172 * devices.
1173 */
0496c8ae 1174static int pm_genpd_thaw_early(struct device *dev)
596ba34b
RW
1175{
1176 struct generic_pm_domain *genpd;
1177
1178 dev_dbg(dev, "%s()\n", __func__);
1179
1180 genpd = dev_to_genpd(dev);
1181 if (IS_ERR(genpd))
1182 return -EINVAL;
1183
0496c8ae 1184 return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev);
596ba34b
RW
1185}
1186
1187/**
1188 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
1189 * @dev: Device to thaw.
1190 *
1191 * Thaw a device under the assumption that its pm_domain field points to the
1192 * domain member of an object of type struct generic_pm_domain representing
1193 * a power domain consisting of I/O devices.
1194 */
1195static int pm_genpd_thaw(struct device *dev)
1196{
1197 struct generic_pm_domain *genpd;
1198
1199 dev_dbg(dev, "%s()\n", __func__);
1200
1201 genpd = dev_to_genpd(dev);
1202 if (IS_ERR(genpd))
1203 return -EINVAL;
1204
d23b9b00 1205 return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
596ba34b
RW
1206}
1207
1208/**
0496c8ae 1209 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
596ba34b
RW
1210 * @dev: Device to resume.
1211 *
0496c8ae
RW
1212 * Make sure the domain will be in the same power state as before the
1213 * hibernation the system is resuming from and start the device if necessary.
596ba34b
RW
1214 */
1215static int pm_genpd_restore_noirq(struct device *dev)
1216{
1217 struct generic_pm_domain *genpd;
1218
1219 dev_dbg(dev, "%s()\n", __func__);
1220
1221 genpd = dev_to_genpd(dev);
1222 if (IS_ERR(genpd))
1223 return -EINVAL;
1224
1225 /*
1226 * Since all of the "noirq" callbacks are executed sequentially, it is
1227 * guaranteed that this function will never run twice in parallel for
1228 * the same PM domain, so it is not necessary to use locking here.
65533bbf
RW
1229 *
1230 * At this point suspended_count == 0 means we are being run for the
1231 * first time for the given domain in the present cycle.
596ba34b 1232 */
65533bbf 1233 if (genpd->suspended_count++ == 0) {
596ba34b 1234 /*
65533bbf 1235 * The boot kernel might put the domain into arbitrary state,
802d8b49
RW
1236 * so make it appear as powered off to pm_genpd_sync_poweron(),
1237 * so that it tries to power it on in case it was really off.
596ba34b 1238 */
65533bbf
RW
1239 genpd->status = GPD_STATE_POWER_OFF;
1240 if (genpd->suspend_power_off) {
1241 /*
1242 * If the domain was off before the hibernation, make
1243 * sure it will be off going forward.
1244 */
1245 if (genpd->power_off)
1246 genpd->power_off(genpd);
1247
1248 return 0;
1249 }
596ba34b
RW
1250 }
1251
18dd2ece
RW
1252 if (genpd->suspend_power_off)
1253 return 0;
1254
802d8b49 1255 pm_genpd_sync_poweron(genpd);
596ba34b 1256
dbf37414 1257 return genpd_start_dev(genpd, dev);
596ba34b
RW
1258}
1259
1260/**
1261 * pm_genpd_complete - Complete power transition of a device in a power domain.
1262 * @dev: Device to complete the transition of.
1263 *
1264 * Complete a power transition of a device (during a system-wide power
1265 * transition) under the assumption that its pm_domain field points to the
1266 * domain member of an object of type struct generic_pm_domain representing
1267 * a power domain consisting of I/O devices.
1268 */
1269static void pm_genpd_complete(struct device *dev)
1270{
1271 struct generic_pm_domain *genpd;
1272 bool run_complete;
1273
1274 dev_dbg(dev, "%s()\n", __func__);
1275
1276 genpd = dev_to_genpd(dev);
1277 if (IS_ERR(genpd))
1278 return;
1279
1280 mutex_lock(&genpd->lock);
1281
1282 run_complete = !genpd->suspend_power_off;
1283 if (--genpd->prepared_count == 0)
1284 genpd->suspend_power_off = false;
1285
1286 mutex_unlock(&genpd->lock);
1287
1288 if (run_complete) {
1289 pm_generic_complete(dev);
6f00ff78 1290 pm_runtime_set_active(dev);
596ba34b 1291 pm_runtime_enable(dev);
6f00ff78 1292 pm_runtime_idle(dev);
596ba34b
RW
1293 }
1294}
1295
77f827de
RW
1296/**
1297 * pm_genpd_syscore_switch - Switch power during system core suspend or resume.
1298 * @dev: Device that normally is marked as "always on" to switch power for.
1299 *
1300 * This routine may only be called during the system core (syscore) suspend or
1301 * resume phase for devices whose "always on" flags are set.
1302 */
1303void pm_genpd_syscore_switch(struct device *dev, bool suspend)
1304{
1305 struct generic_pm_domain *genpd;
1306
1307 genpd = dev_to_genpd(dev);
1308 if (!pm_genpd_present(genpd))
1309 return;
1310
1311 if (suspend) {
1312 genpd->suspended_count++;
1313 pm_genpd_sync_poweroff(genpd);
1314 } else {
1315 pm_genpd_sync_poweron(genpd);
1316 genpd->suspended_count--;
1317 }
1318}
1319EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch);
1320
596ba34b
RW
1321#else
1322
1323#define pm_genpd_prepare NULL
1324#define pm_genpd_suspend NULL
0496c8ae 1325#define pm_genpd_suspend_late NULL
596ba34b 1326#define pm_genpd_suspend_noirq NULL
0496c8ae 1327#define pm_genpd_resume_early NULL
596ba34b
RW
1328#define pm_genpd_resume_noirq NULL
1329#define pm_genpd_resume NULL
1330#define pm_genpd_freeze NULL
0496c8ae 1331#define pm_genpd_freeze_late NULL
596ba34b 1332#define pm_genpd_freeze_noirq NULL
0496c8ae 1333#define pm_genpd_thaw_early NULL
596ba34b
RW
1334#define pm_genpd_thaw_noirq NULL
1335#define pm_genpd_thaw NULL
596ba34b 1336#define pm_genpd_restore_noirq NULL
596ba34b
RW
1337#define pm_genpd_complete NULL
1338
1339#endif /* CONFIG_PM_SLEEP */
1340
1d5fcfec
RW
1341static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev)
1342{
1343 struct generic_pm_domain_data *gpd_data;
1344
1345 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1346 if (!gpd_data)
1347 return NULL;
1348
1349 mutex_init(&gpd_data->lock);
1350 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1351 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1352 return gpd_data;
1353}
1354
1355static void __pm_genpd_free_dev_data(struct device *dev,
1356 struct generic_pm_domain_data *gpd_data)
1357{
1358 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1359 kfree(gpd_data);
1360}
1361
f721889f 1362/**
b02c999a 1363 * __pm_genpd_add_device - Add a device to an I/O PM domain.
f721889f
RW
1364 * @genpd: PM domain to add the device to.
1365 * @dev: Device to be added.
b02c999a 1366 * @td: Set of PM QoS timing parameters to attach to the device.
f721889f 1367 */
b02c999a
RW
1368int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1369 struct gpd_timing_data *td)
f721889f 1370{
1d5fcfec 1371 struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
4605ab65 1372 struct pm_domain_data *pdd;
f721889f
RW
1373 int ret = 0;
1374
1375 dev_dbg(dev, "%s()\n", __func__);
1376
1377 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1378 return -EINVAL;
1379
1d5fcfec
RW
1380 gpd_data_new = __pm_genpd_alloc_dev_data(dev);
1381 if (!gpd_data_new)
6ff7bb0d
RW
1382 return -ENOMEM;
1383
17b75eca 1384 genpd_acquire_lock(genpd);
f721889f 1385
596ba34b
RW
1386 if (genpd->prepared_count > 0) {
1387 ret = -EAGAIN;
1388 goto out;
1389 }
1390
4605ab65
RW
1391 list_for_each_entry(pdd, &genpd->dev_list, list_node)
1392 if (pdd->dev == dev) {
f721889f
RW
1393 ret = -EINVAL;
1394 goto out;
1395 }
1396
1d5fcfec
RW
1397 ret = dev_pm_get_subsys_data(dev);
1398 if (ret)
1399 goto out;
1400
596ba34b 1401 genpd->device_count++;
6ff7bb0d 1402 genpd->max_off_time_changed = true;
f721889f 1403
6ff7bb0d 1404 spin_lock_irq(&dev->power.lock);
1d5fcfec 1405
6ff7bb0d 1406 dev->pm_domain = &genpd->domain;
1d5fcfec
RW
1407 if (dev->power.subsys_data->domain_data) {
1408 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1409 } else {
1410 gpd_data = gpd_data_new;
1411 dev->power.subsys_data->domain_data = &gpd_data->base;
1412 }
1413 gpd_data->refcount++;
b02c999a
RW
1414 if (td)
1415 gpd_data->td = *td;
f721889f 1416
1d5fcfec
RW
1417 spin_unlock_irq(&dev->power.lock);
1418
1419 mutex_lock(&gpd_data->lock);
1420 gpd_data->base.dev = dev;
1421 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1422 gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
6ff7bb0d
RW
1423 gpd_data->td.constraint_changed = true;
1424 gpd_data->td.effective_constraint_ns = -1;
6ff7bb0d
RW
1425 mutex_unlock(&gpd_data->lock);
1426
f721889f 1427 out:
17b75eca 1428 genpd_release_lock(genpd);
f721889f 1429
1d5fcfec
RW
1430 if (gpd_data != gpd_data_new)
1431 __pm_genpd_free_dev_data(dev, gpd_data_new);
1432
f721889f
RW
1433 return ret;
1434}
1435
c8aa130b
TA
1436/**
1437 * __pm_genpd_of_add_device - Add a device to an I/O PM domain.
1438 * @genpd_node: Device tree node pointer representing a PM domain to which the
1439 * the device is added to.
1440 * @dev: Device to be added.
1441 * @td: Set of PM QoS timing parameters to attach to the device.
1442 */
1443int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
1444 struct gpd_timing_data *td)
1445{
1446 struct generic_pm_domain *genpd = NULL, *gpd;
1447
1448 dev_dbg(dev, "%s()\n", __func__);
1449
1450 if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
1451 return -EINVAL;
1452
1453 mutex_lock(&gpd_list_lock);
1454 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1455 if (gpd->of_node == genpd_node) {
1456 genpd = gpd;
1457 break;
1458 }
1459 }
1460 mutex_unlock(&gpd_list_lock);
1461
1462 if (!genpd)
1463 return -EINVAL;
1464
1465 return __pm_genpd_add_device(genpd, dev, td);
1466}
1467
b5abb085
RW
1468
1469/**
1470 * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
1471 * @domain_name: Name of the PM domain to add the device to.
1472 * @dev: Device to be added.
1473 * @td: Set of PM QoS timing parameters to attach to the device.
1474 */
1475int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
1476 struct gpd_timing_data *td)
1477{
1478 struct generic_pm_domain *genpd = NULL, *gpd;
1479
1480 if (IS_ERR_OR_NULL(domain_name) || IS_ERR_OR_NULL(dev))
1481 return -EINVAL;
1482
1483 mutex_lock(&gpd_list_lock);
1484 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1485 if (!strcmp(gpd->name, domain_name)) {
1486 genpd = gpd;
1487 break;
1488 }
1489 }
1490 mutex_unlock(&gpd_list_lock);
1491
1492 return __pm_genpd_add_device(genpd, dev, td);
1493}
1494
f721889f
RW
1495/**
1496 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1497 * @genpd: PM domain to remove the device from.
1498 * @dev: Device to be removed.
1499 */
1500int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1501 struct device *dev)
1502{
6ff7bb0d 1503 struct generic_pm_domain_data *gpd_data;
4605ab65 1504 struct pm_domain_data *pdd;
1d5fcfec 1505 bool remove = false;
efa69025 1506 int ret = 0;
f721889f
RW
1507
1508 dev_dbg(dev, "%s()\n", __func__);
1509
efa69025
RW
1510 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
1511 || IS_ERR_OR_NULL(dev->pm_domain)
1512 || pd_to_genpd(dev->pm_domain) != genpd)
f721889f
RW
1513 return -EINVAL;
1514
17b75eca 1515 genpd_acquire_lock(genpd);
f721889f 1516
596ba34b
RW
1517 if (genpd->prepared_count > 0) {
1518 ret = -EAGAIN;
1519 goto out;
1520 }
1521
6ff7bb0d
RW
1522 genpd->device_count--;
1523 genpd->max_off_time_changed = true;
1524
1525 spin_lock_irq(&dev->power.lock);
1d5fcfec 1526
efa69025
RW
1527 dev->pm_domain = NULL;
1528 pdd = dev->power.subsys_data->domain_data;
1529 list_del_init(&pdd->list_node);
1d5fcfec
RW
1530 gpd_data = to_gpd_data(pdd);
1531 if (--gpd_data->refcount == 0) {
1532 dev->power.subsys_data->domain_data = NULL;
1533 remove = true;
1534 }
1535
6ff7bb0d 1536 spin_unlock_irq(&dev->power.lock);
f721889f 1537
6ff7bb0d
RW
1538 mutex_lock(&gpd_data->lock);
1539 pdd->dev = NULL;
1540 mutex_unlock(&gpd_data->lock);
1541
1542 genpd_release_lock(genpd);
1543
6ff7bb0d 1544 dev_pm_put_subsys_data(dev);
1d5fcfec
RW
1545 if (remove)
1546 __pm_genpd_free_dev_data(dev, gpd_data);
1547
6ff7bb0d 1548 return 0;
f721889f 1549
596ba34b 1550 out:
17b75eca 1551 genpd_release_lock(genpd);
f721889f
RW
1552
1553 return ret;
1554}
1555
ca1d72f0
RW
1556/**
1557 * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
1558 * @dev: Device to set/unset the flag for.
1559 * @val: The new value of the device's "need restore" flag.
1560 */
1561void pm_genpd_dev_need_restore(struct device *dev, bool val)
1562{
1563 struct pm_subsys_data *psd;
1564 unsigned long flags;
1565
1566 spin_lock_irqsave(&dev->power.lock, flags);
1567
1568 psd = dev_to_psd(dev);
1569 if (psd && psd->domain_data)
1570 to_gpd_data(psd->domain_data)->need_restore = val;
1571
1572 spin_unlock_irqrestore(&dev->power.lock, flags);
1573}
1574EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore);
1575
f721889f
RW
1576/**
1577 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1578 * @genpd: Master PM domain to add the subdomain to.
bc0403ff 1579 * @subdomain: Subdomain to be added.
f721889f
RW
1580 */
1581int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
bc0403ff 1582 struct generic_pm_domain *subdomain)
f721889f 1583{
5063ce15 1584 struct gpd_link *link;
f721889f
RW
1585 int ret = 0;
1586
bc0403ff 1587 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
f721889f
RW
1588 return -EINVAL;
1589
17b75eca
RW
1590 start:
1591 genpd_acquire_lock(genpd);
bc0403ff 1592 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
f721889f 1593
bc0403ff
RW
1594 if (subdomain->status != GPD_STATE_POWER_OFF
1595 && subdomain->status != GPD_STATE_ACTIVE) {
1596 mutex_unlock(&subdomain->lock);
17b75eca
RW
1597 genpd_release_lock(genpd);
1598 goto start;
1599 }
1600
1601 if (genpd->status == GPD_STATE_POWER_OFF
bc0403ff 1602 && subdomain->status != GPD_STATE_POWER_OFF) {
f721889f
RW
1603 ret = -EINVAL;
1604 goto out;
1605 }
1606
4fcac10d 1607 list_for_each_entry(link, &genpd->master_links, master_node) {
bc0403ff 1608 if (link->slave == subdomain && link->master == genpd) {
f721889f
RW
1609 ret = -EINVAL;
1610 goto out;
1611 }
1612 }
1613
5063ce15
RW
1614 link = kzalloc(sizeof(*link), GFP_KERNEL);
1615 if (!link) {
1616 ret = -ENOMEM;
1617 goto out;
1618 }
1619 link->master = genpd;
1620 list_add_tail(&link->master_node, &genpd->master_links);
bc0403ff
RW
1621 link->slave = subdomain;
1622 list_add_tail(&link->slave_node, &subdomain->slave_links);
1623 if (subdomain->status != GPD_STATE_POWER_OFF)
c4bb3160 1624 genpd_sd_counter_inc(genpd);
f721889f 1625
f721889f 1626 out:
bc0403ff 1627 mutex_unlock(&subdomain->lock);
17b75eca 1628 genpd_release_lock(genpd);
f721889f
RW
1629
1630 return ret;
1631}
1632
1633/**
1634 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1635 * @genpd: Master PM domain to remove the subdomain from.
5063ce15 1636 * @subdomain: Subdomain to be removed.
f721889f
RW
1637 */
1638int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
5063ce15 1639 struct generic_pm_domain *subdomain)
f721889f 1640{
5063ce15 1641 struct gpd_link *link;
f721889f
RW
1642 int ret = -EINVAL;
1643
5063ce15 1644 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
f721889f
RW
1645 return -EINVAL;
1646
17b75eca
RW
1647 start:
1648 genpd_acquire_lock(genpd);
f721889f 1649
5063ce15
RW
1650 list_for_each_entry(link, &genpd->master_links, master_node) {
1651 if (link->slave != subdomain)
f721889f
RW
1652 continue;
1653
1654 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1655
17b75eca
RW
1656 if (subdomain->status != GPD_STATE_POWER_OFF
1657 && subdomain->status != GPD_STATE_ACTIVE) {
1658 mutex_unlock(&subdomain->lock);
1659 genpd_release_lock(genpd);
1660 goto start;
1661 }
1662
5063ce15
RW
1663 list_del(&link->master_node);
1664 list_del(&link->slave_node);
1665 kfree(link);
17b75eca 1666 if (subdomain->status != GPD_STATE_POWER_OFF)
f721889f
RW
1667 genpd_sd_counter_dec(genpd);
1668
1669 mutex_unlock(&subdomain->lock);
1670
1671 ret = 0;
1672 break;
1673 }
1674
17b75eca 1675 genpd_release_lock(genpd);
f721889f
RW
1676
1677 return ret;
1678}
1679
d5e4cbfe
RW
1680/**
1681 * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
1682 * @dev: Device to add the callbacks to.
1683 * @ops: Set of callbacks to add.
b02c999a 1684 * @td: Timing data to add to the device along with the callbacks (optional).
62d44902
RW
1685 *
1686 * Every call to this routine should be balanced with a call to
1687 * __pm_genpd_remove_callbacks() and they must not be nested.
d5e4cbfe 1688 */
b02c999a
RW
1689int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
1690 struct gpd_timing_data *td)
d5e4cbfe 1691{
62d44902 1692 struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
d5e4cbfe
RW
1693 int ret = 0;
1694
62d44902 1695 if (!(dev && ops))
d5e4cbfe
RW
1696 return -EINVAL;
1697
62d44902
RW
1698 gpd_data_new = __pm_genpd_alloc_dev_data(dev);
1699 if (!gpd_data_new)
1700 return -ENOMEM;
1701
d5e4cbfe
RW
1702 pm_runtime_disable(dev);
1703 device_pm_lock();
1704
62d44902
RW
1705 ret = dev_pm_get_subsys_data(dev);
1706 if (ret)
1707 goto out;
1708
1709 spin_lock_irq(&dev->power.lock);
d5e4cbfe 1710
62d44902
RW
1711 if (dev->power.subsys_data->domain_data) {
1712 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
d5e4cbfe 1713 } else {
62d44902
RW
1714 gpd_data = gpd_data_new;
1715 dev->power.subsys_data->domain_data = &gpd_data->base;
d5e4cbfe 1716 }
62d44902
RW
1717 gpd_data->refcount++;
1718 gpd_data->ops = *ops;
1719 if (td)
1720 gpd_data->td = *td;
d5e4cbfe 1721
62d44902
RW
1722 spin_unlock_irq(&dev->power.lock);
1723
1724 out:
d5e4cbfe
RW
1725 device_pm_unlock();
1726 pm_runtime_enable(dev);
1727
62d44902
RW
1728 if (gpd_data != gpd_data_new)
1729 __pm_genpd_free_dev_data(dev, gpd_data_new);
1730
d5e4cbfe
RW
1731 return ret;
1732}
1733EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
1734
1735/**
b02c999a 1736 * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
d5e4cbfe 1737 * @dev: Device to remove the callbacks from.
b02c999a 1738 * @clear_td: If set, clear the device's timing data too.
62d44902
RW
1739 *
1740 * This routine can only be called after pm_genpd_add_callbacks().
d5e4cbfe 1741 */
b02c999a 1742int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
d5e4cbfe 1743{
62d44902
RW
1744 struct generic_pm_domain_data *gpd_data = NULL;
1745 bool remove = false;
d5e4cbfe
RW
1746 int ret = 0;
1747
1748 if (!(dev && dev->power.subsys_data))
1749 return -EINVAL;
1750
1751 pm_runtime_disable(dev);
1752 device_pm_lock();
1753
62d44902 1754 spin_lock_irq(&dev->power.lock);
d5e4cbfe 1755
62d44902
RW
1756 if (dev->power.subsys_data->domain_data) {
1757 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
db79e53d 1758 gpd_data->ops = (struct gpd_dev_ops){ NULL };
b02c999a
RW
1759 if (clear_td)
1760 gpd_data->td = (struct gpd_timing_data){ 0 };
62d44902
RW
1761
1762 if (--gpd_data->refcount == 0) {
1763 dev->power.subsys_data->domain_data = NULL;
1764 remove = true;
1765 }
d5e4cbfe
RW
1766 } else {
1767 ret = -EINVAL;
1768 }
1769
62d44902
RW
1770 spin_unlock_irq(&dev->power.lock);
1771
d5e4cbfe
RW
1772 device_pm_unlock();
1773 pm_runtime_enable(dev);
1774
62d44902
RW
1775 if (ret)
1776 return ret;
1777
1778 dev_pm_put_subsys_data(dev);
1779 if (remove)
1780 __pm_genpd_free_dev_data(dev, gpd_data);
1781
1782 return 0;
d5e4cbfe 1783}
b02c999a 1784EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
d5e4cbfe 1785
cbc9ef02
RW
1786int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1787{
1788 struct cpuidle_driver *cpuidle_drv;
1789 struct gpd_cpu_data *cpu_data;
1790 struct cpuidle_state *idle_state;
1791 int ret = 0;
1792
1793 if (IS_ERR_OR_NULL(genpd) || state < 0)
1794 return -EINVAL;
1795
1796 genpd_acquire_lock(genpd);
1797
1798 if (genpd->cpu_data) {
1799 ret = -EEXIST;
1800 goto out;
1801 }
1802 cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL);
1803 if (!cpu_data) {
1804 ret = -ENOMEM;
1805 goto out;
1806 }
1807 cpuidle_drv = cpuidle_driver_ref();
1808 if (!cpuidle_drv) {
1809 ret = -ENODEV;
1810 goto out;
1811 }
1812 if (cpuidle_drv->state_count <= state) {
1813 ret = -EINVAL;
1814 goto err;
1815 }
1816 idle_state = &cpuidle_drv->states[state];
1817 if (!idle_state->disabled) {
1818 ret = -EAGAIN;
1819 goto err;
1820 }
1821 cpu_data->idle_state = idle_state;
1822 cpu_data->saved_exit_latency = idle_state->exit_latency;
1823 genpd->cpu_data = cpu_data;
1824 genpd_recalc_cpu_exit_latency(genpd);
1825
1826 out:
1827 genpd_release_lock(genpd);
1828 return ret;
1829
1830 err:
1831 cpuidle_driver_unref();
1832 goto out;
1833}
1834
1835int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
1836{
1837 struct gpd_cpu_data *cpu_data;
1838 struct cpuidle_state *idle_state;
1839 int ret = 0;
1840
1841 if (IS_ERR_OR_NULL(genpd))
1842 return -EINVAL;
1843
1844 genpd_acquire_lock(genpd);
1845
1846 cpu_data = genpd->cpu_data;
1847 if (!cpu_data) {
1848 ret = -ENODEV;
1849 goto out;
1850 }
1851 idle_state = cpu_data->idle_state;
1852 if (!idle_state->disabled) {
1853 ret = -EAGAIN;
1854 goto out;
1855 }
1856 idle_state->exit_latency = cpu_data->saved_exit_latency;
1857 cpuidle_driver_unref();
1858 genpd->cpu_data = NULL;
1859 kfree(cpu_data);
1860
1861 out:
1862 genpd_release_lock(genpd);
1863 return ret;
1864}
1865
d23b9b00
RW
1866/* Default device callbacks for generic PM domains. */
1867
ecf00475
RW
1868/**
1869 * pm_genpd_default_save_state - Default "save device state" for PM domians.
1870 * @dev: Device to handle.
1871 */
1872static int pm_genpd_default_save_state(struct device *dev)
1873{
1874 int (*cb)(struct device *__dev);
ecf00475
RW
1875
1876 cb = dev_gpd_data(dev)->ops.save_state;
1877 if (cb)
1878 return cb(dev);
1879
0b589741
RW
1880 if (dev->type && dev->type->pm)
1881 cb = dev->type->pm->runtime_suspend;
1882 else if (dev->class && dev->class->pm)
1883 cb = dev->class->pm->runtime_suspend;
1884 else if (dev->bus && dev->bus->pm)
1885 cb = dev->bus->pm->runtime_suspend;
1886 else
1887 cb = NULL;
ecf00475 1888
0b589741
RW
1889 if (!cb && dev->driver && dev->driver->pm)
1890 cb = dev->driver->pm->runtime_suspend;
1891
1892 return cb ? cb(dev) : 0;
ecf00475
RW
1893}
1894
1895/**
1896 * pm_genpd_default_restore_state - Default PM domians "restore device state".
1897 * @dev: Device to handle.
1898 */
1899static int pm_genpd_default_restore_state(struct device *dev)
1900{
1901 int (*cb)(struct device *__dev);
ecf00475
RW
1902
1903 cb = dev_gpd_data(dev)->ops.restore_state;
1904 if (cb)
1905 return cb(dev);
1906
0b589741
RW
1907 if (dev->type && dev->type->pm)
1908 cb = dev->type->pm->runtime_resume;
1909 else if (dev->class && dev->class->pm)
1910 cb = dev->class->pm->runtime_resume;
1911 else if (dev->bus && dev->bus->pm)
1912 cb = dev->bus->pm->runtime_resume;
1913 else
1914 cb = NULL;
ecf00475 1915
0b589741
RW
1916 if (!cb && dev->driver && dev->driver->pm)
1917 cb = dev->driver->pm->runtime_resume;
1918
1919 return cb ? cb(dev) : 0;
ecf00475
RW
1920}
1921
0f1d6986
RW
1922#ifdef CONFIG_PM_SLEEP
1923
d23b9b00
RW
1924/**
1925 * pm_genpd_default_suspend - Default "device suspend" for PM domians.
1926 * @dev: Device to handle.
1927 */
1928static int pm_genpd_default_suspend(struct device *dev)
1929{
c9914854 1930 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
d23b9b00
RW
1931
1932 return cb ? cb(dev) : pm_generic_suspend(dev);
1933}
1934
1935/**
1936 * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
1937 * @dev: Device to handle.
1938 */
1939static int pm_genpd_default_suspend_late(struct device *dev)
1940{
c9914854 1941 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
d23b9b00 1942
0496c8ae 1943 return cb ? cb(dev) : pm_generic_suspend_late(dev);
d23b9b00
RW
1944}
1945
1946/**
1947 * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
1948 * @dev: Device to handle.
1949 */
1950static int pm_genpd_default_resume_early(struct device *dev)
1951{
c9914854 1952 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
d23b9b00 1953
0496c8ae 1954 return cb ? cb(dev) : pm_generic_resume_early(dev);
d23b9b00
RW
1955}
1956
1957/**
1958 * pm_genpd_default_resume - Default "device resume" for PM domians.
1959 * @dev: Device to handle.
1960 */
1961static int pm_genpd_default_resume(struct device *dev)
1962{
c9914854 1963 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
d23b9b00
RW
1964
1965 return cb ? cb(dev) : pm_generic_resume(dev);
1966}
1967
1968/**
1969 * pm_genpd_default_freeze - Default "device freeze" for PM domians.
1970 * @dev: Device to handle.
1971 */
1972static int pm_genpd_default_freeze(struct device *dev)
1973{
1974 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
1975
1976 return cb ? cb(dev) : pm_generic_freeze(dev);
1977}
1978
1979/**
1980 * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
1981 * @dev: Device to handle.
1982 */
1983static int pm_genpd_default_freeze_late(struct device *dev)
1984{
1985 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
1986
0496c8ae 1987 return cb ? cb(dev) : pm_generic_freeze_late(dev);
d23b9b00
RW
1988}
1989
1990/**
1991 * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
1992 * @dev: Device to handle.
1993 */
1994static int pm_genpd_default_thaw_early(struct device *dev)
1995{
1996 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
1997
0496c8ae 1998 return cb ? cb(dev) : pm_generic_thaw_early(dev);
d23b9b00
RW
1999}
2000
2001/**
2002 * pm_genpd_default_thaw - Default "device thaw" for PM domians.
2003 * @dev: Device to handle.
2004 */
2005static int pm_genpd_default_thaw(struct device *dev)
2006{
2007 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
2008
2009 return cb ? cb(dev) : pm_generic_thaw(dev);
2010}
2011
0f1d6986
RW
2012#else /* !CONFIG_PM_SLEEP */
2013
2014#define pm_genpd_default_suspend NULL
2015#define pm_genpd_default_suspend_late NULL
2016#define pm_genpd_default_resume_early NULL
2017#define pm_genpd_default_resume NULL
2018#define pm_genpd_default_freeze NULL
2019#define pm_genpd_default_freeze_late NULL
2020#define pm_genpd_default_thaw_early NULL
2021#define pm_genpd_default_thaw NULL
2022
2023#endif /* !CONFIG_PM_SLEEP */
2024
f721889f
RW
2025/**
2026 * pm_genpd_init - Initialize a generic I/O PM domain object.
2027 * @genpd: PM domain object to initialize.
2028 * @gov: PM domain governor to associate with the domain (may be NULL).
2029 * @is_off: Initial value of the domain's power_is_off field.
2030 */
2031void pm_genpd_init(struct generic_pm_domain *genpd,
2032 struct dev_power_governor *gov, bool is_off)
2033{
2034 if (IS_ERR_OR_NULL(genpd))
2035 return;
2036
5063ce15
RW
2037 INIT_LIST_HEAD(&genpd->master_links);
2038 INIT_LIST_HEAD(&genpd->slave_links);
f721889f 2039 INIT_LIST_HEAD(&genpd->dev_list);
f721889f
RW
2040 mutex_init(&genpd->lock);
2041 genpd->gov = gov;
2042 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
2043 genpd->in_progress = 0;
c4bb3160 2044 atomic_set(&genpd->sd_count, 0);
17b75eca
RW
2045 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
2046 init_waitqueue_head(&genpd->status_wait_queue);
c6d22b37
RW
2047 genpd->poweroff_task = NULL;
2048 genpd->resume_count = 0;
596ba34b 2049 genpd->device_count = 0;
221e9b58 2050 genpd->max_off_time_ns = -1;
6ff7bb0d 2051 genpd->max_off_time_changed = true;
f721889f
RW
2052 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
2053 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
2054 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
596ba34b
RW
2055 genpd->domain.ops.prepare = pm_genpd_prepare;
2056 genpd->domain.ops.suspend = pm_genpd_suspend;
0496c8ae 2057 genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
596ba34b
RW
2058 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
2059 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
0496c8ae 2060 genpd->domain.ops.resume_early = pm_genpd_resume_early;
596ba34b
RW
2061 genpd->domain.ops.resume = pm_genpd_resume;
2062 genpd->domain.ops.freeze = pm_genpd_freeze;
0496c8ae 2063 genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
596ba34b
RW
2064 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
2065 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
0496c8ae 2066 genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
596ba34b 2067 genpd->domain.ops.thaw = pm_genpd_thaw;
d23b9b00 2068 genpd->domain.ops.poweroff = pm_genpd_suspend;
0496c8ae 2069 genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
d23b9b00 2070 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
596ba34b 2071 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
0496c8ae 2072 genpd->domain.ops.restore_early = pm_genpd_resume_early;
d23b9b00 2073 genpd->domain.ops.restore = pm_genpd_resume;
596ba34b 2074 genpd->domain.ops.complete = pm_genpd_complete;
ecf00475
RW
2075 genpd->dev_ops.save_state = pm_genpd_default_save_state;
2076 genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
c9914854
RW
2077 genpd->dev_ops.suspend = pm_genpd_default_suspend;
2078 genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
2079 genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
2080 genpd->dev_ops.resume = pm_genpd_default_resume;
d23b9b00
RW
2081 genpd->dev_ops.freeze = pm_genpd_default_freeze;
2082 genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
2083 genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
2084 genpd->dev_ops.thaw = pm_genpd_default_thaw;
5125bbf3
RW
2085 mutex_lock(&gpd_list_lock);
2086 list_add(&genpd->gpd_list_node, &gpd_list);
2087 mutex_unlock(&gpd_list_lock);
2088}