PM: domains: Don't check PM_QOS_FLAG_NO_POWER_OFF in genpd
[linux-2.6-block.git] / drivers / base / power / domain.c
CommitLineData
5de363b6 1// SPDX-License-Identifier: GPL-2.0
f721889f
RW
2/*
3 * drivers/base/power/domain.c - Common code related to device power domains.
4 *
5 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
f721889f 6 */
7a5bd127
JP
7#define pr_fmt(fmt) "PM: " fmt
8
93af5e93 9#include <linux/delay.h>
f721889f
RW
10#include <linux/kernel.h>
11#include <linux/io.h>
aa42240a 12#include <linux/platform_device.h>
6a0ae73d 13#include <linux/pm_opp.h>
f721889f
RW
14#include <linux/pm_runtime.h>
15#include <linux/pm_domain.h>
6ff7bb0d 16#include <linux/pm_qos.h>
c11f6f5b 17#include <linux/pm_clock.h>
f721889f
RW
18#include <linux/slab.h>
19#include <linux/err.h>
17b75eca
RW
20#include <linux/sched.h>
21#include <linux/suspend.h>
d5e4cbfe 22#include <linux/export.h>
eb594b73 23#include <linux/cpu.h>
718072ce 24#include <linux/debugfs.h>
d5e4cbfe 25
aa8e54b5
TV
26#include "power.h"
27
93af5e93
GU
28#define GENPD_RETRY_MAX_MS 250 /* Approximate */
29
d5e4cbfe
RW
30#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
31({ \
32 type (*__routine)(struct device *__d); \
33 type __ret = (type)0; \
34 \
35 __routine = genpd->dev_ops.callback; \
36 if (__routine) { \
37 __ret = __routine(dev); \
d5e4cbfe
RW
38 } \
39 __ret; \
40})
f721889f 41
5125bbf3
RW
42static LIST_HEAD(gpd_list);
43static DEFINE_MUTEX(gpd_list_lock);
44
35241d12
LI
45struct genpd_lock_ops {
46 void (*lock)(struct generic_pm_domain *genpd);
47 void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
48 int (*lock_interruptible)(struct generic_pm_domain *genpd);
49 void (*unlock)(struct generic_pm_domain *genpd);
50};
51
52static void genpd_lock_mtx(struct generic_pm_domain *genpd)
53{
54 mutex_lock(&genpd->mlock);
55}
56
57static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
58 int depth)
59{
60 mutex_lock_nested(&genpd->mlock, depth);
61}
62
63static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
64{
65 return mutex_lock_interruptible(&genpd->mlock);
66}
67
68static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
69{
70 return mutex_unlock(&genpd->mlock);
71}
72
73static const struct genpd_lock_ops genpd_mtx_ops = {
74 .lock = genpd_lock_mtx,
75 .lock_nested = genpd_lock_nested_mtx,
76 .lock_interruptible = genpd_lock_interruptible_mtx,
77 .unlock = genpd_unlock_mtx,
78};
79
d716f479
LI
80static void genpd_lock_spin(struct generic_pm_domain *genpd)
81 __acquires(&genpd->slock)
82{
83 unsigned long flags;
84
85 spin_lock_irqsave(&genpd->slock, flags);
86 genpd->lock_flags = flags;
87}
88
89static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
90 int depth)
91 __acquires(&genpd->slock)
92{
93 unsigned long flags;
94
95 spin_lock_irqsave_nested(&genpd->slock, flags, depth);
96 genpd->lock_flags = flags;
97}
98
99static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
100 __acquires(&genpd->slock)
101{
102 unsigned long flags;
103
104 spin_lock_irqsave(&genpd->slock, flags);
105 genpd->lock_flags = flags;
106 return 0;
107}
108
109static void genpd_unlock_spin(struct generic_pm_domain *genpd)
110 __releases(&genpd->slock)
111{
112 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
113}
114
115static const struct genpd_lock_ops genpd_spin_ops = {
116 .lock = genpd_lock_spin,
117 .lock_nested = genpd_lock_nested_spin,
118 .lock_interruptible = genpd_lock_interruptible_spin,
119 .unlock = genpd_unlock_spin,
120};
121
35241d12
LI
122#define genpd_lock(p) p->lock_ops->lock(p)
123#define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
124#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
125#define genpd_unlock(p) p->lock_ops->unlock(p)
126
49f618e1 127#define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON)
d716f479 128#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
ffaa42e8 129#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
95a20ef6 130#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
eb594b73 131#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
ed61e18a 132#define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
d716f479
LI
133
134static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
d8600c8b 135 const struct generic_pm_domain *genpd)
d716f479
LI
136{
137 bool ret;
138
139 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
140
075c37d5
UH
141 /*
142 * Warn once if an IRQ safe device is attached to a no sleep domain, as
143 * to indicate a suboptimal configuration for PM. For an always on
144 * domain this isn't case, thus don't warn.
145 */
146 if (ret && !genpd_is_always_on(genpd))
d716f479
LI
147 dev_warn_once(dev, "PM domain %s will not be powered off\n",
148 genpd->name);
149
150 return ret;
151}
152
b3ad17c0
UH
153static int genpd_runtime_suspend(struct device *dev);
154
446d999c
RK
155/*
156 * Get the generic PM domain for a particular struct device.
157 * This validates the struct device pointer, the PM domain pointer,
158 * and checks that the PM domain pointer is a real generic PM domain.
159 * Any failure results in NULL being returned.
160 */
b3ad17c0 161static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
446d999c 162{
446d999c
RK
163 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
164 return NULL;
165
b3ad17c0
UH
166 /* A genpd's always have its ->runtime_suspend() callback assigned. */
167 if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
168 return pd_to_genpd(dev->pm_domain);
446d999c 169
b3ad17c0 170 return NULL;
446d999c
RK
171}
172
173/*
174 * This should only be used where we are certain that the pm_domain
175 * attached to the device is a genpd domain.
176 */
177static struct generic_pm_domain *dev_to_genpd(struct device *dev)
5248051b
RW
178{
179 if (IS_ERR_OR_NULL(dev->pm_domain))
180 return ERR_PTR(-EINVAL);
181
596ba34b 182 return pd_to_genpd(dev->pm_domain);
5248051b 183}
f721889f 184
d8600c8b
KK
185static int genpd_stop_dev(const struct generic_pm_domain *genpd,
186 struct device *dev)
d5e4cbfe 187{
2b1d88cd 188 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
d5e4cbfe
RW
189}
190
d8600c8b
KK
191static int genpd_start_dev(const struct generic_pm_domain *genpd,
192 struct device *dev)
d5e4cbfe 193{
2b1d88cd 194 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
d5e4cbfe
RW
195}
196
c4bb3160 197static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
f721889f 198{
c4bb3160
RW
199 bool ret = false;
200
201 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
202 ret = !!atomic_dec_and_test(&genpd->sd_count);
203
204 return ret;
205}
206
207static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
208{
209 atomic_inc(&genpd->sd_count);
4e857c58 210 smp_mb__after_atomic();
f721889f
RW
211}
212
afece3ab 213#ifdef CONFIG_DEBUG_FS
718072ce
TS
214static struct dentry *genpd_debugfs_dir;
215
216static void genpd_debug_add(struct generic_pm_domain *genpd);
217
218static void genpd_debug_remove(struct generic_pm_domain *genpd)
219{
220 struct dentry *d;
221
222 d = debugfs_lookup(genpd->name, genpd_debugfs_dir);
223 debugfs_remove(d);
224}
225
afece3ab
TG
226static void genpd_update_accounting(struct generic_pm_domain *genpd)
227{
bd40cbb0 228 u64 delta, now;
afece3ab 229
bd40cbb0
UH
230 now = ktime_get_mono_fast_ns();
231 if (now <= genpd->accounting_time)
232 return;
233
234 delta = now - genpd->accounting_time;
afece3ab
TG
235
236 /*
237 * If genpd->status is active, it means we are just
238 * out of off and so update the idle time and vice
239 * versa.
240 */
bd40cbb0
UH
241 if (genpd->status == GENPD_STATE_ON)
242 genpd->states[genpd->state_idx].idle_time += delta;
243 else
244 genpd->on_time += delta;
afece3ab
TG
245
246 genpd->accounting_time = now;
247}
248#else
718072ce
TS
249static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
250static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
afece3ab
TG
251static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
252#endif
253
cd50c6d3
VK
254static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
255 unsigned int state)
256{
257 struct generic_pm_domain_data *pd_data;
258 struct pm_domain_data *pdd;
18edf49c 259 struct gpd_link *link;
cd50c6d3
VK
260
261 /* New requested state is same as Max requested state */
262 if (state == genpd->performance_state)
263 return state;
264
265 /* New requested state is higher than Max requested state */
266 if (state > genpd->performance_state)
267 return state;
268
269 /* Traverse all devices within the domain */
270 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
271 pd_data = to_gpd_data(pdd);
272
273 if (pd_data->performance_state > state)
274 state = pd_data->performance_state;
275 }
276
277 /*
18edf49c
VK
278 * Traverse all sub-domains within the domain. This can be
279 * done without any additional locking as the link->performance_state
8d87ae48 280 * field is protected by the parent genpd->lock, which is already taken.
18edf49c
VK
281 *
282 * Also note that link->performance_state (subdomain's performance state
8d87ae48
KC
283 * requirement to parent domain) is different from
284 * link->child->performance_state (current performance state requirement
18edf49c
VK
285 * of the devices/sub-domains of the subdomain) and so can have a
286 * different value.
287 *
288 * Note that we also take vote from powered-off sub-domains into account
289 * as the same is done for devices right now.
cd50c6d3 290 */
8d87ae48 291 list_for_each_entry(link, &genpd->parent_links, parent_node) {
18edf49c
VK
292 if (link->performance_state > state)
293 state = link->performance_state;
294 }
295
cd50c6d3
VK
296 return state;
297}
298
079c42a0
DO
299static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
300 struct generic_pm_domain *parent,
301 unsigned int pstate)
302{
303 if (!parent->set_performance_state)
304 return pstate;
305
306 return dev_pm_opp_xlate_performance_state(genpd->opp_table,
307 parent->opp_table,
308 pstate);
309}
310
cd50c6d3 311static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
18edf49c 312 unsigned int state, int depth)
cd50c6d3 313{
8d87ae48 314 struct generic_pm_domain *parent;
18edf49c 315 struct gpd_link *link;
8d87ae48 316 int parent_state, ret;
cd50c6d3
VK
317
318 if (state == genpd->performance_state)
319 return 0;
320
8d87ae48
KC
321 /* Propagate to parents of genpd */
322 list_for_each_entry(link, &genpd->child_links, child_node) {
323 parent = link->parent;
18edf49c 324
8d87ae48 325 /* Find parent's performance state */
079c42a0 326 ret = genpd_xlate_performance_state(genpd, parent, state);
18edf49c
VK
327 if (unlikely(ret < 0))
328 goto err;
329
8d87ae48 330 parent_state = ret;
18edf49c 331
8d87ae48 332 genpd_lock_nested(parent, depth + 1);
18edf49c
VK
333
334 link->prev_performance_state = link->performance_state;
8d87ae48
KC
335 link->performance_state = parent_state;
336 parent_state = _genpd_reeval_performance_state(parent,
337 parent_state);
338 ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
18edf49c
VK
339 if (ret)
340 link->performance_state = link->prev_performance_state;
341
8d87ae48 342 genpd_unlock(parent);
18edf49c
VK
343
344 if (ret)
345 goto err;
346 }
347
079c42a0
DO
348 if (genpd->set_performance_state) {
349 ret = genpd->set_performance_state(genpd, state);
350 if (ret)
351 goto err;
352 }
cd50c6d3
VK
353
354 genpd->performance_state = state;
355 return 0;
18edf49c
VK
356
357err:
358 /* Encountered an error, lets rollback */
8d87ae48
KC
359 list_for_each_entry_continue_reverse(link, &genpd->child_links,
360 child_node) {
361 parent = link->parent;
18edf49c 362
8d87ae48 363 genpd_lock_nested(parent, depth + 1);
18edf49c 364
8d87ae48
KC
365 parent_state = link->prev_performance_state;
366 link->performance_state = parent_state;
18edf49c 367
8d87ae48
KC
368 parent_state = _genpd_reeval_performance_state(parent,
369 parent_state);
370 if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
18edf49c 371 pr_err("%s: Failed to roll back to %d performance state\n",
8d87ae48 372 parent->name, parent_state);
18edf49c
VK
373 }
374
8d87ae48 375 genpd_unlock(parent);
18edf49c
VK
376 }
377
378 return ret;
cd50c6d3
VK
379}
380
0eef091d
UH
381static int genpd_set_performance_state(struct device *dev, unsigned int state)
382{
383 struct generic_pm_domain *genpd = dev_to_genpd(dev);
384 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
385 unsigned int prev_state;
386 int ret;
387
388 prev_state = gpd_data->performance_state;
d97fe100
UH
389 if (prev_state == state)
390 return 0;
391
0eef091d
UH
392 gpd_data->performance_state = state;
393 state = _genpd_reeval_performance_state(genpd, state);
394
395 ret = _genpd_set_performance_state(genpd, state, 0);
396 if (ret)
397 gpd_data->performance_state = prev_state;
398
399 return ret;
400}
401
5937c3ce
UH
402static int genpd_drop_performance_state(struct device *dev)
403{
404 unsigned int prev_state = dev_gpd_data(dev)->performance_state;
405
406 if (!genpd_set_performance_state(dev, 0))
407 return prev_state;
408
409 return 0;
410}
411
412static void genpd_restore_performance_state(struct device *dev,
413 unsigned int state)
414{
415 if (state)
416 genpd_set_performance_state(dev, state);
417}
418
42f6284a
VK
419/**
420 * dev_pm_genpd_set_performance_state- Set performance state of device's power
421 * domain.
422 *
423 * @dev: Device for which the performance-state needs to be set.
424 * @state: Target performance state of the device. This can be set as 0 when the
425 * device doesn't have any performance state constraints left (And so
426 * the device wouldn't participate anymore to find the target
427 * performance state of the genpd).
428 *
429 * It is assumed that the users guarantee that the genpd wouldn't be detached
430 * while this routine is getting called.
431 *
432 * Returns 0 on success and negative error values on failures.
433 */
434int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
435{
436 struct generic_pm_domain *genpd;
3c5a2722 437 int ret = 0;
42f6284a 438
3ea4ca92
UH
439 genpd = dev_to_genpd_safe(dev);
440 if (!genpd)
42f6284a
VK
441 return -ENODEV;
442
e757e7fa
YL
443 if (WARN_ON(!dev->power.subsys_data ||
444 !dev->power.subsys_data->domain_data))
42f6284a 445 return -EINVAL;
42f6284a
VK
446
447 genpd_lock(genpd);
3c5a2722
DO
448 if (pm_runtime_suspended(dev)) {
449 dev_gpd_data(dev)->rpm_pstate = state;
450 } else {
451 ret = genpd_set_performance_state(dev, state);
452 if (!ret)
453 dev_gpd_data(dev)->rpm_pstate = 0;
454 }
42f6284a
VK
455 genpd_unlock(genpd);
456
457 return ret;
458}
459EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
460
67e3242e
LI
461/**
462 * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
463 *
464 * @dev: Device to handle
465 * @next: impending interrupt/wakeup for the device
466 *
467 *
468 * Allow devices to inform of the next wakeup. It's assumed that the users
469 * guarantee that the genpd wouldn't be detached while this routine is getting
470 * called. Additionally, it's also assumed that @dev isn't runtime suspended
471 * (RPM_SUSPENDED)."
472 * Although devices are expected to update the next_wakeup after the end of
473 * their usecase as well, it is possible the devices themselves may not know
474 * about that, so stale @next will be ignored when powering off the domain.
475 */
476void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
477{
478 struct generic_pm_domain_data *gpd_data;
479 struct generic_pm_domain *genpd;
480
481 genpd = dev_to_genpd_safe(dev);
482 if (!genpd)
483 return;
484
485 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
486 gpd_data->next_wakeup = next;
487}
488EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
489
86e12eac 490static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
c8f0ea45 491{
fc5cbf0c 492 unsigned int state_idx = genpd->state_idx;
c8f0ea45
GU
493 ktime_t time_start;
494 s64 elapsed_ns;
330e3932 495 int ret;
d4f81383
UH
496
497 /* Notify consumers that we are about to power on. */
330e3932
UH
498 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
499 GENPD_NOTIFY_PRE_ON,
500 GENPD_NOTIFY_OFF, NULL);
d4f81383
UH
501 ret = notifier_to_errno(ret);
502 if (ret)
330e3932 503 return ret;
c8f0ea45
GU
504
505 if (!genpd->power_on)
d4f81383 506 goto out;
c8f0ea45 507
d4f81383
UH
508 if (!timed) {
509 ret = genpd->power_on(genpd);
510 if (ret)
511 goto err;
512
513 goto out;
514 }
a4630c61 515
c8f0ea45
GU
516 time_start = ktime_get();
517 ret = genpd->power_on(genpd);
518 if (ret)
d4f81383 519 goto err;
c8f0ea45
GU
520
521 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
fc5cbf0c 522 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
d4f81383 523 goto out;
c8f0ea45 524
fc5cbf0c 525 genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
c8f0ea45 526 genpd->max_off_time_changed = true;
6d7d5c32
RK
527 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
528 genpd->name, "on", elapsed_ns);
c8f0ea45 529
d4f81383
UH
530out:
531 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
532 return 0;
533err:
534 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
535 NULL);
c8f0ea45
GU
536 return ret;
537}
538
86e12eac 539static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
c8f0ea45 540{
fc5cbf0c 541 unsigned int state_idx = genpd->state_idx;
c8f0ea45
GU
542 ktime_t time_start;
543 s64 elapsed_ns;
330e3932 544 int ret;
d4f81383
UH
545
546 /* Notify consumers that we are about to power off. */
330e3932
UH
547 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
548 GENPD_NOTIFY_PRE_OFF,
549 GENPD_NOTIFY_ON, NULL);
d4f81383
UH
550 ret = notifier_to_errno(ret);
551 if (ret)
330e3932 552 return ret;
c8f0ea45
GU
553
554 if (!genpd->power_off)
d4f81383
UH
555 goto out;
556
557 if (!timed) {
558 ret = genpd->power_off(genpd);
559 if (ret)
560 goto busy;
c8f0ea45 561
d4f81383
UH
562 goto out;
563 }
a4630c61 564
c8f0ea45
GU
565 time_start = ktime_get();
566 ret = genpd->power_off(genpd);
0cec68a9 567 if (ret)
d4f81383 568 goto busy;
c8f0ea45
GU
569
570 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
fc5cbf0c 571 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
d4f81383 572 goto out;
c8f0ea45 573
fc5cbf0c 574 genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
c8f0ea45 575 genpd->max_off_time_changed = true;
6d7d5c32
RK
576 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
577 genpd->name, "off", elapsed_ns);
c8f0ea45 578
d4f81383
UH
579out:
580 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
581 NULL);
0cec68a9 582 return 0;
d4f81383 583busy:
330e3932 584 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
d4f81383 585 return ret;
c8f0ea45
GU
586}
587
29e47e21 588/**
86e12eac 589 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
a3d09c73 590 * @genpd: PM domain to power off.
29e47e21 591 *
86e12eac 592 * Queue up the execution of genpd_power_off() unless it's already been done
29e47e21
UH
593 * before.
594 */
595static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
596{
597 queue_work(pm_wq, &genpd->power_off_work);
598}
599
1f8728b7
UH
600/**
601 * genpd_power_off - Remove power from a given PM domain.
602 * @genpd: PM domain to power down.
3c64649d
UH
603 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
604 * RPM status of the releated device is in an intermediate state, not yet turned
605 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
606 * be RPM_SUSPENDED, while it tries to power off the PM domain.
763663c9 607 * @depth: nesting count for lockdep.
1f8728b7
UH
608 *
609 * If all of the @genpd's devices have been suspended and all of its subdomains
610 * have been powered down, remove power from @genpd.
611 */
2da83545
UH
612static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
613 unsigned int depth)
1f8728b7
UH
614{
615 struct pm_domain_data *pdd;
616 struct gpd_link *link;
617 unsigned int not_suspended = 0;
f63816e4 618 int ret;
1f8728b7
UH
619
620 /*
621 * Do not try to power off the domain in the following situations:
622 * (1) The domain is already in the "power off" state.
623 * (2) System suspend is in progress.
624 */
41e2c8e0 625 if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
1f8728b7
UH
626 return 0;
627
ffaa42e8
UH
628 /*
629 * Abort power off for the PM domain in the following situations:
630 * (1) The domain is configured as always on.
631 * (2) When the domain has a subdomain being powered on.
632 */
ed61e18a
LC
633 if (genpd_is_always_on(genpd) ||
634 genpd_is_rpm_always_on(genpd) ||
635 atomic_read(&genpd->sd_count) > 0)
1f8728b7
UH
636 return -EBUSY;
637
e7d90cfa
UH
638 /*
639 * The children must be in their deepest (powered-off) states to allow
640 * the parent to be powered off. Note that, there's no need for
641 * additional locking, as powering on a child, requires the parent's
642 * lock to be acquired first.
643 */
644 list_for_each_entry(link, &genpd->parent_links, parent_node) {
645 struct generic_pm_domain *child = link->child;
646 if (child->state_idx < child->state_count - 1)
647 return -EBUSY;
648 }
649
1f8728b7 650 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
1f8728b7
UH
651 /*
652 * Do not allow PM domain to be powered off, when an IRQ safe
653 * device is part of a non-IRQ safe domain.
654 */
655 if (!pm_runtime_suspended(pdd->dev) ||
656 irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
657 not_suspended++;
658 }
659
3c64649d 660 if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
1f8728b7
UH
661 return -EBUSY;
662
663 if (genpd->gov && genpd->gov->power_down_ok) {
664 if (!genpd->gov->power_down_ok(&genpd->domain))
665 return -EAGAIN;
666 }
667
2c9b7f87
UH
668 /* Default to shallowest state. */
669 if (!genpd->gov)
670 genpd->state_idx = 0;
671
f63816e4
UH
672 /* Don't power off, if a child domain is waiting to power on. */
673 if (atomic_read(&genpd->sd_count) > 0)
674 return -EBUSY;
1f8728b7 675
f63816e4 676 ret = _genpd_power_off(genpd, true);
c6a113b5
LI
677 if (ret) {
678 genpd->states[genpd->state_idx].rejected++;
f63816e4 679 return ret;
c6a113b5 680 }
1f8728b7 681
49f618e1 682 genpd->status = GENPD_STATE_OFF;
afece3ab 683 genpd_update_accounting(genpd);
c6a113b5 684 genpd->states[genpd->state_idx].usage++;
1f8728b7 685
8d87ae48
KC
686 list_for_each_entry(link, &genpd->child_links, child_node) {
687 genpd_sd_counter_dec(link->parent);
688 genpd_lock_nested(link->parent, depth + 1);
689 genpd_power_off(link->parent, false, depth + 1);
690 genpd_unlock(link->parent);
1f8728b7
UH
691 }
692
693 return 0;
694}
695
5248051b 696/**
8d87ae48 697 * genpd_power_on - Restore power to a given PM domain and its parents.
5248051b 698 * @genpd: PM domain to power up.
0106ef51 699 * @depth: nesting count for lockdep.
5248051b 700 *
8d87ae48 701 * Restore power to @genpd and all of its parents so that it is possible to
5248051b
RW
702 * resume a device belonging to it.
703 */
86e12eac 704static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
5248051b 705{
5063ce15 706 struct gpd_link *link;
5248051b
RW
707 int ret = 0;
708
41e2c8e0 709 if (genpd_status_on(genpd))
3f241775 710 return 0;
5248051b 711
5063ce15
RW
712 /*
713 * The list is guaranteed not to change while the loop below is being
8d87ae48 714 * executed, unless one of the parents' .power_on() callbacks fiddles
5063ce15
RW
715 * with it.
716 */
8d87ae48
KC
717 list_for_each_entry(link, &genpd->child_links, child_node) {
718 struct generic_pm_domain *parent = link->parent;
0106ef51 719
8d87ae48 720 genpd_sd_counter_inc(parent);
0106ef51 721
8d87ae48
KC
722 genpd_lock_nested(parent, depth + 1);
723 ret = genpd_power_on(parent, depth + 1);
724 genpd_unlock(parent);
5248051b 725
5063ce15 726 if (ret) {
8d87ae48 727 genpd_sd_counter_dec(parent);
9e08cf42 728 goto err;
5063ce15 729 }
5248051b
RW
730 }
731
86e12eac 732 ret = _genpd_power_on(genpd, true);
c8f0ea45
GU
733 if (ret)
734 goto err;
5248051b 735
49f618e1 736 genpd->status = GENPD_STATE_ON;
afece3ab
TG
737 genpd_update_accounting(genpd);
738
3f241775 739 return 0;
9e08cf42
RW
740
741 err:
29e47e21 742 list_for_each_entry_continue_reverse(link,
8d87ae48
KC
743 &genpd->child_links,
744 child_node) {
745 genpd_sd_counter_dec(link->parent);
746 genpd_lock_nested(link->parent, depth + 1);
747 genpd_power_off(link->parent, false, depth + 1);
748 genpd_unlock(link->parent);
29e47e21 749 }
9e08cf42 750
3f241775
RW
751 return ret;
752}
753
ea71c596
UH
754static int genpd_dev_pm_start(struct device *dev)
755{
756 struct generic_pm_domain *genpd = dev_to_genpd(dev);
757
758 return genpd_start_dev(genpd, dev);
759}
760
6ff7bb0d
RW
761static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
762 unsigned long val, void *ptr)
763{
764 struct generic_pm_domain_data *gpd_data;
765 struct device *dev;
766
767 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
6ff7bb0d 768 dev = gpd_data->base.dev;
6ff7bb0d
RW
769
770 for (;;) {
771 struct generic_pm_domain *genpd;
772 struct pm_domain_data *pdd;
773
774 spin_lock_irq(&dev->power.lock);
775
776 pdd = dev->power.subsys_data ?
777 dev->power.subsys_data->domain_data : NULL;
b4883ca4 778 if (pdd) {
6ff7bb0d
RW
779 to_gpd_data(pdd)->td.constraint_changed = true;
780 genpd = dev_to_genpd(dev);
781 } else {
782 genpd = ERR_PTR(-ENODATA);
783 }
784
785 spin_unlock_irq(&dev->power.lock);
786
787 if (!IS_ERR(genpd)) {
35241d12 788 genpd_lock(genpd);
6ff7bb0d 789 genpd->max_off_time_changed = true;
35241d12 790 genpd_unlock(genpd);
6ff7bb0d
RW
791 }
792
793 dev = dev->parent;
794 if (!dev || dev->power.ignore_children)
795 break;
796 }
797
798 return NOTIFY_DONE;
799}
800
f721889f
RW
801/**
802 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
803 * @work: Work structure used for scheduling the execution of this function.
804 */
805static void genpd_power_off_work_fn(struct work_struct *work)
806{
807 struct generic_pm_domain *genpd;
808
809 genpd = container_of(work, struct generic_pm_domain, power_off_work);
810
35241d12 811 genpd_lock(genpd);
2da83545 812 genpd_power_off(genpd, false, 0);
35241d12 813 genpd_unlock(genpd);
f721889f
RW
814}
815
54eeddbf
UH
816/**
817 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
818 * @dev: Device to handle.
819 */
820static int __genpd_runtime_suspend(struct device *dev)
821{
822 int (*cb)(struct device *__dev);
823
824 if (dev->type && dev->type->pm)
825 cb = dev->type->pm->runtime_suspend;
826 else if (dev->class && dev->class->pm)
827 cb = dev->class->pm->runtime_suspend;
828 else if (dev->bus && dev->bus->pm)
829 cb = dev->bus->pm->runtime_suspend;
830 else
831 cb = NULL;
832
833 if (!cb && dev->driver && dev->driver->pm)
834 cb = dev->driver->pm->runtime_suspend;
835
836 return cb ? cb(dev) : 0;
837}
838
839/**
840 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
841 * @dev: Device to handle.
842 */
843static int __genpd_runtime_resume(struct device *dev)
844{
845 int (*cb)(struct device *__dev);
846
847 if (dev->type && dev->type->pm)
848 cb = dev->type->pm->runtime_resume;
849 else if (dev->class && dev->class->pm)
850 cb = dev->class->pm->runtime_resume;
851 else if (dev->bus && dev->bus->pm)
852 cb = dev->bus->pm->runtime_resume;
853 else
854 cb = NULL;
855
856 if (!cb && dev->driver && dev->driver->pm)
857 cb = dev->driver->pm->runtime_resume;
858
859 return cb ? cb(dev) : 0;
860}
861
f721889f 862/**
795bd2e7 863 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
f721889f
RW
864 * @dev: Device to suspend.
865 *
866 * Carry out a runtime suspend of a device under the assumption that its
867 * pm_domain field points to the domain member of an object of type
868 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
869 */
795bd2e7 870static int genpd_runtime_suspend(struct device *dev)
f721889f
RW
871{
872 struct generic_pm_domain *genpd;
9df3921e 873 bool (*suspend_ok)(struct device *__dev);
5937c3ce
UH
874 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
875 struct gpd_timing_data *td = &gpd_data->td;
ffe12855 876 bool runtime_pm = pm_runtime_enabled(dev);
2b1d88cd
UH
877 ktime_t time_start;
878 s64 elapsed_ns;
d5e4cbfe 879 int ret;
f721889f
RW
880
881 dev_dbg(dev, "%s()\n", __func__);
882
5248051b
RW
883 genpd = dev_to_genpd(dev);
884 if (IS_ERR(genpd))
f721889f
RW
885 return -EINVAL;
886
ffe12855
UH
887 /*
888 * A runtime PM centric subsystem/driver may re-use the runtime PM
889 * callbacks for other purposes than runtime PM. In those scenarios
890 * runtime PM is disabled. Under these circumstances, we shall skip
891 * validating/measuring the PM QoS latency.
892 */
9df3921e
UH
893 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
894 if (runtime_pm && suspend_ok && !suspend_ok(dev))
b02c999a
RW
895 return -EBUSY;
896
2b1d88cd 897 /* Measure suspend latency. */
d33d5a6c 898 time_start = 0;
ffe12855
UH
899 if (runtime_pm)
900 time_start = ktime_get();
2b1d88cd 901
54eeddbf 902 ret = __genpd_runtime_suspend(dev);
d5e4cbfe
RW
903 if (ret)
904 return ret;
17b75eca 905
2b1d88cd 906 ret = genpd_stop_dev(genpd, dev);
ba2bbfbf 907 if (ret) {
54eeddbf 908 __genpd_runtime_resume(dev);
ba2bbfbf
UH
909 return ret;
910 }
911
2b1d88cd 912 /* Update suspend latency value if the measured time exceeds it. */
ffe12855
UH
913 if (runtime_pm) {
914 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
915 if (elapsed_ns > td->suspend_latency_ns) {
916 td->suspend_latency_ns = elapsed_ns;
917 dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
918 elapsed_ns);
919 genpd->max_off_time_changed = true;
920 td->constraint_changed = true;
921 }
2b1d88cd
UH
922 }
923
0aa2a221 924 /*
d716f479
LI
925 * If power.irq_safe is set, this routine may be run with
926 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
0aa2a221 927 */
d716f479 928 if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
0aa2a221
RW
929 return 0;
930
35241d12 931 genpd_lock(genpd);
5937c3ce 932 gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
2da83545 933 genpd_power_off(genpd, true, 0);
35241d12 934 genpd_unlock(genpd);
f721889f
RW
935
936 return 0;
937}
938
f721889f 939/**
795bd2e7 940 * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
f721889f
RW
941 * @dev: Device to resume.
942 *
943 * Carry out a runtime resume of a device under the assumption that its
944 * pm_domain field points to the domain member of an object of type
945 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
946 */
795bd2e7 947static int genpd_runtime_resume(struct device *dev)
f721889f
RW
948{
949 struct generic_pm_domain *genpd;
5937c3ce
UH
950 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
951 struct gpd_timing_data *td = &gpd_data->td;
ffe12855 952 bool runtime_pm = pm_runtime_enabled(dev);
2b1d88cd
UH
953 ktime_t time_start;
954 s64 elapsed_ns;
f721889f 955 int ret;
ba2bbfbf 956 bool timed = true;
f721889f
RW
957
958 dev_dbg(dev, "%s()\n", __func__);
959
5248051b
RW
960 genpd = dev_to_genpd(dev);
961 if (IS_ERR(genpd))
f721889f
RW
962 return -EINVAL;
963
d716f479
LI
964 /*
965 * As we don't power off a non IRQ safe domain, which holds
966 * an IRQ safe device, we don't need to restore power to it.
967 */
968 if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
ba2bbfbf
UH
969 timed = false;
970 goto out;
971 }
0aa2a221 972
35241d12 973 genpd_lock(genpd);
86e12eac 974 ret = genpd_power_on(genpd, 0);
5937c3ce
UH
975 if (!ret)
976 genpd_restore_performance_state(dev, gpd_data->rpm_pstate);
35241d12 977 genpd_unlock(genpd);
c6d22b37 978
ba2bbfbf
UH
979 if (ret)
980 return ret;
c6d22b37 981
ba2bbfbf 982 out:
2b1d88cd 983 /* Measure resume latency. */
ab51e6ba 984 time_start = 0;
ffe12855 985 if (timed && runtime_pm)
2b1d88cd
UH
986 time_start = ktime_get();
987
076395ca
LP
988 ret = genpd_start_dev(genpd, dev);
989 if (ret)
990 goto err_poweroff;
991
54eeddbf 992 ret = __genpd_runtime_resume(dev);
076395ca
LP
993 if (ret)
994 goto err_stop;
2b1d88cd
UH
995
996 /* Update resume latency value if the measured time exceeds it. */
ffe12855 997 if (timed && runtime_pm) {
2b1d88cd
UH
998 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
999 if (elapsed_ns > td->resume_latency_ns) {
1000 td->resume_latency_ns = elapsed_ns;
1001 dev_dbg(dev, "resume latency exceeded, %lld ns\n",
1002 elapsed_ns);
1003 genpd->max_off_time_changed = true;
1004 td->constraint_changed = true;
1005 }
1006 }
17b75eca 1007
f721889f 1008 return 0;
076395ca
LP
1009
1010err_stop:
1011 genpd_stop_dev(genpd, dev);
1012err_poweroff:
6dc466d3 1013 if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
35241d12 1014 genpd_lock(genpd);
5937c3ce 1015 gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
2da83545 1016 genpd_power_off(genpd, true, 0);
35241d12 1017 genpd_unlock(genpd);
076395ca
LP
1018 }
1019
1020 return ret;
f721889f
RW
1021}
1022
39ac5ba5
TB
1023static bool pd_ignore_unused;
1024static int __init pd_ignore_unused_setup(char *__unused)
1025{
1026 pd_ignore_unused = true;
1027 return 1;
1028}
1029__setup("pd_ignore_unused", pd_ignore_unused_setup);
1030
17f2ae7f 1031/**
86e12eac 1032 * genpd_power_off_unused - Power off all PM domains with no devices in use.
17f2ae7f 1033 */
86e12eac 1034static int __init genpd_power_off_unused(void)
17f2ae7f
RW
1035{
1036 struct generic_pm_domain *genpd;
1037
39ac5ba5
TB
1038 if (pd_ignore_unused) {
1039 pr_warn("genpd: Not disabling unused power domains\n");
bb4b72fc 1040 return 0;
39ac5ba5
TB
1041 }
1042
17f2ae7f
RW
1043 mutex_lock(&gpd_list_lock);
1044
1045 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
1046 genpd_queue_power_off_work(genpd);
1047
1048 mutex_unlock(&gpd_list_lock);
17f2ae7f 1049
2fe71dcd
UH
1050 return 0;
1051}
86e12eac 1052late_initcall(genpd_power_off_unused);
2fe71dcd 1053
0159ec67
JH
1054#ifdef CONFIG_PM_SLEEP
1055
596ba34b 1056/**
8d87ae48 1057 * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
596ba34b 1058 * @genpd: PM domain to power off, if possible.
0883ac03
UH
1059 * @use_lock: use the lock.
1060 * @depth: nesting count for lockdep.
596ba34b
RW
1061 *
1062 * Check if the given PM domain can be powered off (during system suspend or
8d87ae48 1063 * hibernation) and do that if so. Also, in that case propagate to its parents.
596ba34b 1064 *
77f827de 1065 * This function is only called in "noirq" and "syscore" stages of system power
0883ac03
UH
1066 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1067 * these cases the lock must be held.
596ba34b 1068 */
0883ac03
UH
1069static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
1070 unsigned int depth)
596ba34b 1071{
5063ce15 1072 struct gpd_link *link;
596ba34b 1073
ffaa42e8 1074 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
596ba34b
RW
1075 return;
1076
c4bb3160
RW
1077 if (genpd->suspended_count != genpd->device_count
1078 || atomic_read(&genpd->sd_count) > 0)
596ba34b
RW
1079 return;
1080
e7d90cfa
UH
1081 /* Check that the children are in their deepest (powered-off) state. */
1082 list_for_each_entry(link, &genpd->parent_links, parent_node) {
1083 struct generic_pm_domain *child = link->child;
1084 if (child->state_idx < child->state_count - 1)
1085 return;
1086 }
1087
fc5cbf0c
AH
1088 /* Choose the deepest state when suspending */
1089 genpd->state_idx = genpd->state_count - 1;
1c14967c
UH
1090 if (_genpd_power_off(genpd, false))
1091 return;
596ba34b 1092
49f618e1 1093 genpd->status = GENPD_STATE_OFF;
5063ce15 1094
8d87ae48
KC
1095 list_for_each_entry(link, &genpd->child_links, child_node) {
1096 genpd_sd_counter_dec(link->parent);
0883ac03
UH
1097
1098 if (use_lock)
8d87ae48 1099 genpd_lock_nested(link->parent, depth + 1);
0883ac03 1100
8d87ae48 1101 genpd_sync_power_off(link->parent, use_lock, depth + 1);
0883ac03
UH
1102
1103 if (use_lock)
8d87ae48 1104 genpd_unlock(link->parent);
596ba34b
RW
1105 }
1106}
1107
802d8b49 1108/**
8d87ae48 1109 * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
802d8b49 1110 * @genpd: PM domain to power on.
0883ac03
UH
1111 * @use_lock: use the lock.
1112 * @depth: nesting count for lockdep.
802d8b49 1113 *
77f827de 1114 * This function is only called in "noirq" and "syscore" stages of system power
0883ac03
UH
1115 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1116 * these cases the lock must be held.
802d8b49 1117 */
0883ac03
UH
1118static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1119 unsigned int depth)
802d8b49
RW
1120{
1121 struct gpd_link *link;
1122
41e2c8e0 1123 if (genpd_status_on(genpd))
802d8b49
RW
1124 return;
1125
8d87ae48
KC
1126 list_for_each_entry(link, &genpd->child_links, child_node) {
1127 genpd_sd_counter_inc(link->parent);
0883ac03
UH
1128
1129 if (use_lock)
8d87ae48 1130 genpd_lock_nested(link->parent, depth + 1);
0883ac03 1131
8d87ae48 1132 genpd_sync_power_on(link->parent, use_lock, depth + 1);
0883ac03
UH
1133
1134 if (use_lock)
8d87ae48 1135 genpd_unlock(link->parent);
802d8b49
RW
1136 }
1137
86e12eac 1138 _genpd_power_on(genpd, false);
49f618e1 1139 genpd->status = GENPD_STATE_ON;
802d8b49
RW
1140}
1141
596ba34b 1142/**
9e9704ea 1143 * genpd_prepare - Start power transition of a device in a PM domain.
596ba34b
RW
1144 * @dev: Device to start the transition of.
1145 *
1146 * Start a power transition of a device (during a system-wide power transition)
1147 * under the assumption that its pm_domain field points to the domain member of
1148 * an object of type struct generic_pm_domain representing a PM domain
1149 * consisting of I/O devices.
1150 */
9e9704ea 1151static int genpd_prepare(struct device *dev)
596ba34b
RW
1152{
1153 struct generic_pm_domain *genpd;
b6c10c84 1154 int ret;
596ba34b
RW
1155
1156 dev_dbg(dev, "%s()\n", __func__);
1157
1158 genpd = dev_to_genpd(dev);
1159 if (IS_ERR(genpd))
1160 return -EINVAL;
1161
35241d12 1162 genpd_lock(genpd);
596ba34b 1163
39dd0f23 1164 if (genpd->prepared_count++ == 0)
65533bbf 1165 genpd->suspended_count = 0;
17b75eca 1166
35241d12 1167 genpd_unlock(genpd);
596ba34b 1168
b6c10c84 1169 ret = pm_generic_prepare(dev);
5241ab40 1170 if (ret < 0) {
35241d12 1171 genpd_lock(genpd);
b6c10c84 1172
39dd0f23 1173 genpd->prepared_count--;
b6c10c84 1174
35241d12 1175 genpd_unlock(genpd);
b6c10c84 1176 }
17b75eca 1177
5241ab40
UH
1178 /* Never return 1, as genpd don't cope with the direct_complete path. */
1179 return ret >= 0 ? 0 : ret;
596ba34b
RW
1180}
1181
0496c8ae 1182/**
10da6542
MP
1183 * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1184 * I/O pm domain.
0496c8ae 1185 * @dev: Device to suspend.
10da6542 1186 * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback.
0496c8ae
RW
1187 *
1188 * Stop the device and remove power from the domain if all devices in it have
1189 * been stopped.
1190 */
10da6542 1191static int genpd_finish_suspend(struct device *dev, bool poweroff)
0496c8ae
RW
1192{
1193 struct generic_pm_domain *genpd;
a935424b 1194 int ret = 0;
0496c8ae 1195
0496c8ae
RW
1196 genpd = dev_to_genpd(dev);
1197 if (IS_ERR(genpd))
1198 return -EINVAL;
596ba34b 1199
10da6542
MP
1200 if (poweroff)
1201 ret = pm_generic_poweroff_noirq(dev);
1202 else
1203 ret = pm_generic_suspend_noirq(dev);
1204 if (ret)
1205 return ret;
1206
4e1d9a73 1207 if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
a935424b
UH
1208 return 0;
1209
17218e00
RW
1210 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1211 !pm_runtime_status_suspended(dev)) {
1212 ret = genpd_stop_dev(genpd, dev);
a935424b
UH
1213 if (ret) {
1214 if (poweroff)
1215 pm_generic_restore_noirq(dev);
1216 else
1217 pm_generic_resume_noirq(dev);
122a2237 1218 return ret;
a935424b 1219 }
122a2237
UH
1220 }
1221
0883ac03 1222 genpd_lock(genpd);
596ba34b 1223 genpd->suspended_count++;
0883ac03
UH
1224 genpd_sync_power_off(genpd, true, 0);
1225 genpd_unlock(genpd);
596ba34b
RW
1226
1227 return 0;
1228}
1229
10da6542 1230/**
9e9704ea 1231 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
10da6542
MP
1232 * @dev: Device to suspend.
1233 *
1234 * Stop the device and remove power from the domain if all devices in it have
1235 * been stopped.
1236 */
9e9704ea 1237static int genpd_suspend_noirq(struct device *dev)
10da6542
MP
1238{
1239 dev_dbg(dev, "%s()\n", __func__);
1240
1241 return genpd_finish_suspend(dev, false);
1242}
1243
596ba34b 1244/**
9e9704ea 1245 * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
596ba34b
RW
1246 * @dev: Device to resume.
1247 *
0496c8ae 1248 * Restore power to the device's PM domain, if necessary, and start the device.
596ba34b 1249 */
9e9704ea 1250static int genpd_resume_noirq(struct device *dev)
596ba34b
RW
1251{
1252 struct generic_pm_domain *genpd;
a935424b 1253 int ret;
596ba34b
RW
1254
1255 dev_dbg(dev, "%s()\n", __func__);
1256
1257 genpd = dev_to_genpd(dev);
1258 if (IS_ERR(genpd))
1259 return -EINVAL;
1260
4e1d9a73 1261 if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
a935424b 1262 return pm_generic_resume_noirq(dev);
596ba34b 1263
0883ac03
UH
1264 genpd_lock(genpd);
1265 genpd_sync_power_on(genpd, true, 0);
596ba34b 1266 genpd->suspended_count--;
0883ac03 1267 genpd_unlock(genpd);
596ba34b 1268
17218e00
RW
1269 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1270 !pm_runtime_status_suspended(dev)) {
1271 ret = genpd_start_dev(genpd, dev);
a935424b
UH
1272 if (ret)
1273 return ret;
1274 }
122a2237 1275
a935424b 1276 return pm_generic_resume_noirq(dev);
596ba34b
RW
1277}
1278
0496c8ae 1279/**
9e9704ea 1280 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
596ba34b
RW
1281 * @dev: Device to freeze.
1282 *
1283 * Carry out a late freeze of a device under the assumption that its
1284 * pm_domain field points to the domain member of an object of type
1285 * struct generic_pm_domain representing a power domain consisting of I/O
1286 * devices.
1287 */
9e9704ea 1288static int genpd_freeze_noirq(struct device *dev)
596ba34b 1289{
d8600c8b 1290 const struct generic_pm_domain *genpd;
122a2237 1291 int ret = 0;
596ba34b
RW
1292
1293 dev_dbg(dev, "%s()\n", __func__);
1294
1295 genpd = dev_to_genpd(dev);
1296 if (IS_ERR(genpd))
1297 return -EINVAL;
1298
10da6542
MP
1299 ret = pm_generic_freeze_noirq(dev);
1300 if (ret)
1301 return ret;
1302
17218e00
RW
1303 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1304 !pm_runtime_status_suspended(dev))
1305 ret = genpd_stop_dev(genpd, dev);
122a2237
UH
1306
1307 return ret;
0496c8ae 1308}
596ba34b 1309
0496c8ae 1310/**
9e9704ea 1311 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
0496c8ae
RW
1312 * @dev: Device to thaw.
1313 *
1314 * Start the device, unless power has been removed from the domain already
1315 * before the system transition.
1316 */
9e9704ea 1317static int genpd_thaw_noirq(struct device *dev)
0496c8ae 1318{
d8600c8b 1319 const struct generic_pm_domain *genpd;
122a2237 1320 int ret = 0;
596ba34b 1321
0496c8ae 1322 dev_dbg(dev, "%s()\n", __func__);
596ba34b 1323
0496c8ae
RW
1324 genpd = dev_to_genpd(dev);
1325 if (IS_ERR(genpd))
1326 return -EINVAL;
1327
17218e00
RW
1328 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1329 !pm_runtime_status_suspended(dev)) {
1330 ret = genpd_start_dev(genpd, dev);
10da6542
MP
1331 if (ret)
1332 return ret;
1333 }
122a2237 1334
10da6542
MP
1335 return pm_generic_thaw_noirq(dev);
1336}
1337
1338/**
9e9704ea 1339 * genpd_poweroff_noirq - Completion of hibernation of device in an
10da6542
MP
1340 * I/O PM domain.
1341 * @dev: Device to poweroff.
1342 *
1343 * Stop the device and remove power from the domain if all devices in it have
1344 * been stopped.
1345 */
9e9704ea 1346static int genpd_poweroff_noirq(struct device *dev)
10da6542
MP
1347{
1348 dev_dbg(dev, "%s()\n", __func__);
1349
1350 return genpd_finish_suspend(dev, true);
596ba34b
RW
1351}
1352
596ba34b 1353/**
9e9704ea 1354 * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
596ba34b
RW
1355 * @dev: Device to resume.
1356 *
0496c8ae
RW
1357 * Make sure the domain will be in the same power state as before the
1358 * hibernation the system is resuming from and start the device if necessary.
596ba34b 1359 */
9e9704ea 1360static int genpd_restore_noirq(struct device *dev)
596ba34b
RW
1361{
1362 struct generic_pm_domain *genpd;
122a2237 1363 int ret = 0;
596ba34b
RW
1364
1365 dev_dbg(dev, "%s()\n", __func__);
1366
1367 genpd = dev_to_genpd(dev);
1368 if (IS_ERR(genpd))
1369 return -EINVAL;
1370
1371 /*
65533bbf
RW
1372 * At this point suspended_count == 0 means we are being run for the
1373 * first time for the given domain in the present cycle.
596ba34b 1374 */
0883ac03 1375 genpd_lock(genpd);
505a70b7 1376 if (genpd->suspended_count++ == 0) {
596ba34b 1377 /*
65533bbf 1378 * The boot kernel might put the domain into arbitrary state,
86e12eac 1379 * so make it appear as powered off to genpd_sync_power_on(),
802d8b49 1380 * so that it tries to power it on in case it was really off.
596ba34b 1381 */
49f618e1 1382 genpd->status = GENPD_STATE_OFF;
505a70b7 1383 }
18dd2ece 1384
0883ac03
UH
1385 genpd_sync_power_on(genpd, true, 0);
1386 genpd_unlock(genpd);
596ba34b 1387
17218e00
RW
1388 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1389 !pm_runtime_status_suspended(dev)) {
1390 ret = genpd_start_dev(genpd, dev);
10da6542
MP
1391 if (ret)
1392 return ret;
1393 }
122a2237 1394
10da6542 1395 return pm_generic_restore_noirq(dev);
596ba34b
RW
1396}
1397
1398/**
9e9704ea 1399 * genpd_complete - Complete power transition of a device in a power domain.
596ba34b
RW
1400 * @dev: Device to complete the transition of.
1401 *
1402 * Complete a power transition of a device (during a system-wide power
1403 * transition) under the assumption that its pm_domain field points to the
1404 * domain member of an object of type struct generic_pm_domain representing
1405 * a power domain consisting of I/O devices.
1406 */
9e9704ea 1407static void genpd_complete(struct device *dev)
596ba34b
RW
1408{
1409 struct generic_pm_domain *genpd;
596ba34b
RW
1410
1411 dev_dbg(dev, "%s()\n", __func__);
1412
1413 genpd = dev_to_genpd(dev);
1414 if (IS_ERR(genpd))
1415 return;
1416
4d23a5e8
UH
1417 pm_generic_complete(dev);
1418
35241d12 1419 genpd_lock(genpd);
596ba34b 1420
39dd0f23 1421 genpd->prepared_count--;
4d23a5e8
UH
1422 if (!genpd->prepared_count)
1423 genpd_queue_power_off_work(genpd);
596ba34b 1424
35241d12 1425 genpd_unlock(genpd);
596ba34b
RW
1426}
1427
fc519890 1428static void genpd_switch_state(struct device *dev, bool suspend)
77f827de
RW
1429{
1430 struct generic_pm_domain *genpd;
b9795a3e 1431 bool use_lock;
77f827de 1432
fe0c2baa
UH
1433 genpd = dev_to_genpd_safe(dev);
1434 if (!genpd)
77f827de
RW
1435 return;
1436
b9795a3e
UH
1437 use_lock = genpd_is_irq_safe(genpd);
1438
1439 if (use_lock)
1440 genpd_lock(genpd);
1441
77f827de
RW
1442 if (suspend) {
1443 genpd->suspended_count++;
b9795a3e 1444 genpd_sync_power_off(genpd, use_lock, 0);
77f827de 1445 } else {
b9795a3e 1446 genpd_sync_power_on(genpd, use_lock, 0);
77f827de
RW
1447 genpd->suspended_count--;
1448 }
b9795a3e
UH
1449
1450 if (use_lock)
1451 genpd_unlock(genpd);
77f827de 1452}
d47e6464 1453
fc519890
UH
1454/**
1455 * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev
1456 * @dev: The device that is attached to the genpd, that can be suspended.
1457 *
1458 * This routine should typically be called for a device that needs to be
b9795a3e
UH
1459 * suspended during the syscore suspend phase. It may also be called during
1460 * suspend-to-idle to suspend a corresponding CPU device that is attached to a
1461 * genpd.
fc519890
UH
1462 */
1463void dev_pm_genpd_suspend(struct device *dev)
d47e6464 1464{
fc519890 1465 genpd_switch_state(dev, true);
d47e6464 1466}
fc519890 1467EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
d47e6464 1468
fc519890
UH
1469/**
1470 * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev
1471 * @dev: The device that is attached to the genpd, which needs to be resumed.
1472 *
1473 * This routine should typically be called for a device that needs to be resumed
b9795a3e
UH
1474 * during the syscore resume phase. It may also be called during suspend-to-idle
1475 * to resume a corresponding CPU device that is attached to a genpd.
fc519890
UH
1476 */
1477void dev_pm_genpd_resume(struct device *dev)
d47e6464 1478{
fc519890 1479 genpd_switch_state(dev, false);
d47e6464 1480}
fc519890 1481EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
77f827de 1482
d30d819d 1483#else /* !CONFIG_PM_SLEEP */
596ba34b 1484
9e9704ea
UH
1485#define genpd_prepare NULL
1486#define genpd_suspend_noirq NULL
1487#define genpd_resume_noirq NULL
1488#define genpd_freeze_noirq NULL
1489#define genpd_thaw_noirq NULL
1490#define genpd_poweroff_noirq NULL
1491#define genpd_restore_noirq NULL
1492#define genpd_complete NULL
596ba34b
RW
1493
1494#endif /* CONFIG_PM_SLEEP */
1495
a174920d 1496static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev)
1d5fcfec
RW
1497{
1498 struct generic_pm_domain_data *gpd_data;
3e235685
UH
1499 int ret;
1500
1501 ret = dev_pm_get_subsys_data(dev);
1502 if (ret)
1503 return ERR_PTR(ret);
1d5fcfec
RW
1504
1505 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
3e235685
UH
1506 if (!gpd_data) {
1507 ret = -ENOMEM;
1508 goto err_put;
1509 }
1d5fcfec 1510
f104e1e5 1511 gpd_data->base.dev = dev;
f104e1e5 1512 gpd_data->td.constraint_changed = true;
0759e80b 1513 gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
f104e1e5 1514 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
67e3242e 1515 gpd_data->next_wakeup = KTIME_MAX;
f104e1e5
UH
1516
1517 spin_lock_irq(&dev->power.lock);
1518
1519 if (dev->power.subsys_data->domain_data) {
1520 ret = -EINVAL;
1521 goto err_free;
1522 }
1523
1524 dev->power.subsys_data->domain_data = &gpd_data->base;
f104e1e5
UH
1525
1526 spin_unlock_irq(&dev->power.lock);
1527
1d5fcfec 1528 return gpd_data;
3e235685 1529
f104e1e5
UH
1530 err_free:
1531 spin_unlock_irq(&dev->power.lock);
1532 kfree(gpd_data);
3e235685
UH
1533 err_put:
1534 dev_pm_put_subsys_data(dev);
1535 return ERR_PTR(ret);
1d5fcfec
RW
1536}
1537
49d400c7
UH
1538static void genpd_free_dev_data(struct device *dev,
1539 struct generic_pm_domain_data *gpd_data)
1d5fcfec 1540{
f104e1e5
UH
1541 spin_lock_irq(&dev->power.lock);
1542
f104e1e5
UH
1543 dev->power.subsys_data->domain_data = NULL;
1544
1545 spin_unlock_irq(&dev->power.lock);
1546
1d5fcfec 1547 kfree(gpd_data);
3e235685 1548 dev_pm_put_subsys_data(dev);
1d5fcfec
RW
1549}
1550
b24e1965
UH
1551static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1552 int cpu, bool set, unsigned int depth)
eb594b73
UH
1553{
1554 struct gpd_link *link;
1555
1556 if (!genpd_is_cpu_domain(genpd))
1557 return;
1558
8d87ae48
KC
1559 list_for_each_entry(link, &genpd->child_links, child_node) {
1560 struct generic_pm_domain *parent = link->parent;
eb594b73 1561
8d87ae48
KC
1562 genpd_lock_nested(parent, depth + 1);
1563 genpd_update_cpumask(parent, cpu, set, depth + 1);
1564 genpd_unlock(parent);
eb594b73
UH
1565 }
1566
1567 if (set)
1568 cpumask_set_cpu(cpu, genpd->cpus);
1569 else
1570 cpumask_clear_cpu(cpu, genpd->cpus);
1571}
1572
b24e1965
UH
1573static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1574{
1575 if (cpu >= 0)
1576 genpd_update_cpumask(genpd, cpu, true, 0);
1577}
1578
1579static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1580{
1581 if (cpu >= 0)
1582 genpd_update_cpumask(genpd, cpu, false, 0);
1583}
1584
1585static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
eb594b73
UH
1586{
1587 int cpu;
1588
1589 if (!genpd_is_cpu_domain(genpd))
b24e1965 1590 return -1;
eb594b73
UH
1591
1592 for_each_possible_cpu(cpu) {
b24e1965
UH
1593 if (get_cpu_device(cpu) == dev)
1594 return cpu;
eb594b73 1595 }
eb594b73 1596
b24e1965 1597 return -1;
eb594b73
UH
1598}
1599
f9ccd7c3
UH
1600static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1601 struct device *base_dev)
f721889f 1602{
c0356db7 1603 struct generic_pm_domain_data *gpd_data;
f9ccd7c3 1604 int ret;
f721889f
RW
1605
1606 dev_dbg(dev, "%s()\n", __func__);
1607
1608 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1609 return -EINVAL;
1610
a174920d 1611 gpd_data = genpd_alloc_dev_data(dev);
3e235685
UH
1612 if (IS_ERR(gpd_data))
1613 return PTR_ERR(gpd_data);
6ff7bb0d 1614
f9ccd7c3 1615 gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
b24e1965 1616
b472c2fa
UH
1617 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1618 if (ret)
1619 goto out;
d79b6fe1 1620
2071ac98
JW
1621 genpd_lock(genpd);
1622
f9ccd7c3 1623 genpd_set_cpumask(genpd, gpd_data->cpu);
975e83cf
SH
1624 dev_pm_domain_set(dev, &genpd->domain);
1625
14b53064
UH
1626 genpd->device_count++;
1627 genpd->max_off_time_changed = true;
1628
1d5fcfec 1629 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
6ff7bb0d 1630
35241d12 1631 genpd_unlock(genpd);
2071ac98 1632 out:
c0356db7
UH
1633 if (ret)
1634 genpd_free_dev_data(dev, gpd_data);
1635 else
0b07ee94
VK
1636 dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1637 DEV_PM_QOS_RESUME_LATENCY);
1d5fcfec 1638
f721889f
RW
1639 return ret;
1640}
19efa5ff
JH
1641
1642/**
1a7a6707 1643 * pm_genpd_add_device - Add a device to an I/O PM domain.
19efa5ff
JH
1644 * @genpd: PM domain to add the device to.
1645 * @dev: Device to be added.
19efa5ff 1646 */
1a7a6707 1647int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
19efa5ff
JH
1648{
1649 int ret;
1650
1651 mutex_lock(&gpd_list_lock);
f9ccd7c3 1652 ret = genpd_add_device(genpd, dev, dev);
19efa5ff
JH
1653 mutex_unlock(&gpd_list_lock);
1654
1655 return ret;
1656}
1a7a6707 1657EXPORT_SYMBOL_GPL(pm_genpd_add_device);
f721889f 1658
85168d56
UH
1659static int genpd_remove_device(struct generic_pm_domain *genpd,
1660 struct device *dev)
f721889f 1661{
6ff7bb0d 1662 struct generic_pm_domain_data *gpd_data;
4605ab65 1663 struct pm_domain_data *pdd;
f9ccd7c3 1664 int ret = 0;
f721889f
RW
1665
1666 dev_dbg(dev, "%s()\n", __func__);
1667
c0356db7
UH
1668 pdd = dev->power.subsys_data->domain_data;
1669 gpd_data = to_gpd_data(pdd);
0b07ee94
VK
1670 dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1671 DEV_PM_QOS_RESUME_LATENCY);
c0356db7 1672
35241d12 1673 genpd_lock(genpd);
f721889f 1674
596ba34b
RW
1675 if (genpd->prepared_count > 0) {
1676 ret = -EAGAIN;
1677 goto out;
1678 }
1679
6ff7bb0d
RW
1680 genpd->device_count--;
1681 genpd->max_off_time_changed = true;
1682
f9ccd7c3 1683 genpd_clear_cpumask(genpd, gpd_data->cpu);
975e83cf
SH
1684 dev_pm_domain_set(dev, NULL);
1685
efa69025 1686 list_del_init(&pdd->list_node);
6ff7bb0d 1687
35241d12 1688 genpd_unlock(genpd);
6ff7bb0d 1689
2071ac98
JW
1690 if (genpd->detach_dev)
1691 genpd->detach_dev(genpd, dev);
1692
c1dbe2fb 1693 genpd_free_dev_data(dev, gpd_data);
1d5fcfec 1694
6ff7bb0d 1695 return 0;
f721889f 1696
596ba34b 1697 out:
35241d12 1698 genpd_unlock(genpd);
0b07ee94 1699 dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
f721889f
RW
1700
1701 return ret;
1702}
85168d56
UH
1703
1704/**
1705 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
85168d56
UH
1706 * @dev: Device to be removed.
1707 */
924f4486 1708int pm_genpd_remove_device(struct device *dev)
85168d56 1709{
b3ad17c0 1710 struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
924f4486
UH
1711
1712 if (!genpd)
85168d56
UH
1713 return -EINVAL;
1714
1715 return genpd_remove_device(genpd, dev);
1716}
24c96dc7 1717EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
f721889f 1718
d4f81383
UH
1719/**
1720 * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
1721 *
1722 * @dev: Device that should be associated with the notifier
1723 * @nb: The notifier block to register
1724 *
1725 * Users may call this function to add a genpd power on/off notifier for an
1726 * attached @dev. Only one notifier per device is allowed. The notifier is
1727 * sent when genpd is powering on/off the PM domain.
1728 *
1729 * It is assumed that the user guarantee that the genpd wouldn't be detached
1730 * while this routine is getting called.
1731 *
1732 * Returns 0 on success and negative error values on failures.
1733 */
1734int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
1735{
1736 struct generic_pm_domain *genpd;
1737 struct generic_pm_domain_data *gpd_data;
1738 int ret;
1739
1740 genpd = dev_to_genpd_safe(dev);
1741 if (!genpd)
1742 return -ENODEV;
1743
1744 if (WARN_ON(!dev->power.subsys_data ||
1745 !dev->power.subsys_data->domain_data))
1746 return -EINVAL;
1747
1748 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1749 if (gpd_data->power_nb)
1750 return -EEXIST;
1751
1752 genpd_lock(genpd);
1753 ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
1754 genpd_unlock(genpd);
1755
1756 if (ret) {
1757 dev_warn(dev, "failed to add notifier for PM domain %s\n",
1758 genpd->name);
1759 return ret;
1760 }
1761
1762 gpd_data->power_nb = nb;
1763 return 0;
1764}
1765EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
1766
1767/**
1768 * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
1769 *
1770 * @dev: Device that is associated with the notifier
1771 *
1772 * Users may call this function to remove a genpd power on/off notifier for an
1773 * attached @dev.
1774 *
1775 * It is assumed that the user guarantee that the genpd wouldn't be detached
1776 * while this routine is getting called.
1777 *
1778 * Returns 0 on success and negative error values on failures.
1779 */
1780int dev_pm_genpd_remove_notifier(struct device *dev)
1781{
1782 struct generic_pm_domain *genpd;
1783 struct generic_pm_domain_data *gpd_data;
1784 int ret;
1785
1786 genpd = dev_to_genpd_safe(dev);
1787 if (!genpd)
1788 return -ENODEV;
1789
1790 if (WARN_ON(!dev->power.subsys_data ||
1791 !dev->power.subsys_data->domain_data))
1792 return -EINVAL;
1793
1794 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1795 if (!gpd_data->power_nb)
1796 return -ENODEV;
1797
1798 genpd_lock(genpd);
1799 ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
1800 gpd_data->power_nb);
1801 genpd_unlock(genpd);
1802
1803 if (ret) {
1804 dev_warn(dev, "failed to remove notifier for PM domain %s\n",
1805 genpd->name);
1806 return ret;
1807 }
1808
1809 gpd_data->power_nb = NULL;
1810 return 0;
1811}
1812EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
1813
19efa5ff
JH
1814static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1815 struct generic_pm_domain *subdomain)
f721889f 1816{
2547923d 1817 struct gpd_link *link, *itr;
f721889f
RW
1818 int ret = 0;
1819
fb7268be
RW
1820 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1821 || genpd == subdomain)
f721889f
RW
1822 return -EINVAL;
1823
d716f479
LI
1824 /*
1825 * If the domain can be powered on/off in an IRQ safe
1826 * context, ensure that the subdomain can also be
1827 * powered on/off in that context.
1828 */
1829 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
44cae7d5 1830 WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
d716f479
LI
1831 genpd->name, subdomain->name);
1832 return -EINVAL;
1833 }
1834
2547923d
LI
1835 link = kzalloc(sizeof(*link), GFP_KERNEL);
1836 if (!link)
1837 return -ENOMEM;
1838
35241d12
LI
1839 genpd_lock(subdomain);
1840 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
f721889f 1841
41e2c8e0 1842 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
f721889f
RW
1843 ret = -EINVAL;
1844 goto out;
1845 }
1846
8d87ae48
KC
1847 list_for_each_entry(itr, &genpd->parent_links, parent_node) {
1848 if (itr->child == subdomain && itr->parent == genpd) {
f721889f
RW
1849 ret = -EINVAL;
1850 goto out;
1851 }
1852 }
1853
8d87ae48
KC
1854 link->parent = genpd;
1855 list_add_tail(&link->parent_node, &genpd->parent_links);
1856 link->child = subdomain;
1857 list_add_tail(&link->child_node, &subdomain->child_links);
41e2c8e0 1858 if (genpd_status_on(subdomain))
c4bb3160 1859 genpd_sd_counter_inc(genpd);
f721889f 1860
f721889f 1861 out:
35241d12
LI
1862 genpd_unlock(genpd);
1863 genpd_unlock(subdomain);
2547923d
LI
1864 if (ret)
1865 kfree(link);
f721889f
RW
1866 return ret;
1867}
19efa5ff
JH
1868
1869/**
1870 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
8d87ae48 1871 * @genpd: Leader PM domain to add the subdomain to.
19efa5ff
JH
1872 * @subdomain: Subdomain to be added.
1873 */
1874int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1875 struct generic_pm_domain *subdomain)
1876{
1877 int ret;
1878
1879 mutex_lock(&gpd_list_lock);
1880 ret = genpd_add_subdomain(genpd, subdomain);
1881 mutex_unlock(&gpd_list_lock);
1882
1883 return ret;
1884}
d60ee966 1885EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
f721889f
RW
1886
1887/**
1888 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
8d87ae48 1889 * @genpd: Leader PM domain to remove the subdomain from.
5063ce15 1890 * @subdomain: Subdomain to be removed.
f721889f
RW
1891 */
1892int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
5063ce15 1893 struct generic_pm_domain *subdomain)
f721889f 1894{
c6e83cac 1895 struct gpd_link *l, *link;
f721889f
RW
1896 int ret = -EINVAL;
1897
5063ce15 1898 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
f721889f
RW
1899 return -EINVAL;
1900
35241d12
LI
1901 genpd_lock(subdomain);
1902 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
f721889f 1903
8d87ae48 1904 if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
7a5bd127
JP
1905 pr_warn("%s: unable to remove subdomain %s\n",
1906 genpd->name, subdomain->name);
30e7a65b
JH
1907 ret = -EBUSY;
1908 goto out;
1909 }
1910
8d87ae48
KC
1911 list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
1912 if (link->child != subdomain)
f721889f
RW
1913 continue;
1914
8d87ae48
KC
1915 list_del(&link->parent_node);
1916 list_del(&link->child_node);
5063ce15 1917 kfree(link);
41e2c8e0 1918 if (genpd_status_on(subdomain))
f721889f
RW
1919 genpd_sd_counter_dec(genpd);
1920
f721889f
RW
1921 ret = 0;
1922 break;
1923 }
1924
30e7a65b 1925out:
35241d12
LI
1926 genpd_unlock(genpd);
1927 genpd_unlock(subdomain);
f721889f
RW
1928
1929 return ret;
1930}
d60ee966 1931EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
f721889f 1932
49a27e27
UH
1933static void genpd_free_default_power_state(struct genpd_power_state *states,
1934 unsigned int state_count)
1935{
1936 kfree(states);
1937}
1938
59d65b73
LI
1939static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1940{
1941 struct genpd_power_state *state;
1942
1943 state = kzalloc(sizeof(*state), GFP_KERNEL);
1944 if (!state)
1945 return -ENOMEM;
1946
1947 genpd->states = state;
1948 genpd->state_count = 1;
49a27e27 1949 genpd->free_states = genpd_free_default_power_state;
59d65b73
LI
1950
1951 return 0;
1952}
1953
d716f479
LI
1954static void genpd_lock_init(struct generic_pm_domain *genpd)
1955{
1956 if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
1957 spin_lock_init(&genpd->slock);
1958 genpd->lock_ops = &genpd_spin_ops;
1959 } else {
1960 mutex_init(&genpd->mlock);
1961 genpd->lock_ops = &genpd_mtx_ops;
1962 }
1963}
1964
f721889f
RW
1965/**
1966 * pm_genpd_init - Initialize a generic I/O PM domain object.
1967 * @genpd: PM domain object to initialize.
1968 * @gov: PM domain governor to associate with the domain (may be NULL).
1969 * @is_off: Initial value of the domain's power_is_off field.
7eb231c3
UH
1970 *
1971 * Returns 0 on successful initialization, else a negative error code.
f721889f 1972 */
7eb231c3
UH
1973int pm_genpd_init(struct generic_pm_domain *genpd,
1974 struct dev_power_governor *gov, bool is_off)
f721889f 1975{
59d65b73
LI
1976 int ret;
1977
f721889f 1978 if (IS_ERR_OR_NULL(genpd))
7eb231c3 1979 return -EINVAL;
f721889f 1980
8d87ae48
KC
1981 INIT_LIST_HEAD(&genpd->parent_links);
1982 INIT_LIST_HEAD(&genpd->child_links);
f721889f 1983 INIT_LIST_HEAD(&genpd->dev_list);
d4f81383 1984 RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
d716f479 1985 genpd_lock_init(genpd);
f721889f
RW
1986 genpd->gov = gov;
1987 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
c4bb3160 1988 atomic_set(&genpd->sd_count, 0);
49f618e1 1989 genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
596ba34b 1990 genpd->device_count = 0;
221e9b58 1991 genpd->max_off_time_ns = -1;
6ff7bb0d 1992 genpd->max_off_time_changed = true;
de0aa06d
JH
1993 genpd->provider = NULL;
1994 genpd->has_provider = false;
bd40cbb0 1995 genpd->accounting_time = ktime_get_mono_fast_ns();
795bd2e7
UH
1996 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
1997 genpd->domain.ops.runtime_resume = genpd_runtime_resume;
9e9704ea
UH
1998 genpd->domain.ops.prepare = genpd_prepare;
1999 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
2000 genpd->domain.ops.resume_noirq = genpd_resume_noirq;
2001 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
2002 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
2003 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
2004 genpd->domain.ops.restore_noirq = genpd_restore_noirq;
2005 genpd->domain.ops.complete = genpd_complete;
ea71c596 2006 genpd->domain.start = genpd_dev_pm_start;
c11f6f5b
UH
2007
2008 if (genpd->flags & GENPD_FLAG_PM_CLK) {
2009 genpd->dev_ops.stop = pm_clk_suspend;
2010 genpd->dev_ops.start = pm_clk_resume;
2011 }
2012
27656dcd
UH
2013 /* The always-on governor works better with the corresponding flag. */
2014 if (gov == &pm_domain_always_on_gov)
2015 genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
2016
ffaa42e8 2017 /* Always-on domains must be powered on at initialization. */
ed61e18a
LC
2018 if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
2019 !genpd_status_on(genpd))
ffaa42e8
UH
2020 return -EINVAL;
2021
eb594b73
UH
2022 if (genpd_is_cpu_domain(genpd) &&
2023 !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
2024 return -ENOMEM;
2025
fc5cbf0c 2026 /* Use only one "off" state if there were no states declared */
59d65b73
LI
2027 if (genpd->state_count == 0) {
2028 ret = genpd_set_default_power_state(genpd);
eb594b73
UH
2029 if (ret) {
2030 if (genpd_is_cpu_domain(genpd))
2031 free_cpumask_var(genpd->cpus);
59d65b73 2032 return ret;
eb594b73 2033 }
46b7fe94 2034 } else if (!gov && genpd->state_count > 1) {
7a5bd127 2035 pr_warn("%s: no governor for states\n", genpd->name);
59d65b73 2036 }
fc5cbf0c 2037
401ea157
VK
2038 device_initialize(&genpd->dev);
2039 dev_set_name(&genpd->dev, "%s", genpd->name);
2040
5125bbf3
RW
2041 mutex_lock(&gpd_list_lock);
2042 list_add(&genpd->gpd_list_node, &gpd_list);
2043 mutex_unlock(&gpd_list_lock);
40ba55e4 2044 genpd_debug_add(genpd);
7eb231c3
UH
2045
2046 return 0;
5125bbf3 2047}
be5ed55d 2048EXPORT_SYMBOL_GPL(pm_genpd_init);
aa42240a 2049
3fe57710
JH
2050static int genpd_remove(struct generic_pm_domain *genpd)
2051{
2052 struct gpd_link *l, *link;
2053
2054 if (IS_ERR_OR_NULL(genpd))
2055 return -EINVAL;
2056
35241d12 2057 genpd_lock(genpd);
3fe57710
JH
2058
2059 if (genpd->has_provider) {
35241d12 2060 genpd_unlock(genpd);
3fe57710
JH
2061 pr_err("Provider present, unable to remove %s\n", genpd->name);
2062 return -EBUSY;
2063 }
2064
8d87ae48 2065 if (!list_empty(&genpd->parent_links) || genpd->device_count) {
35241d12 2066 genpd_unlock(genpd);
3fe57710
JH
2067 pr_err("%s: unable to remove %s\n", __func__, genpd->name);
2068 return -EBUSY;
2069 }
2070
8d87ae48
KC
2071 list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
2072 list_del(&link->parent_node);
2073 list_del(&link->child_node);
3fe57710
JH
2074 kfree(link);
2075 }
2076
2077 list_del(&genpd->gpd_list_node);
35241d12 2078 genpd_unlock(genpd);
f6bfe8b5 2079 genpd_debug_remove(genpd);
3fe57710 2080 cancel_work_sync(&genpd->power_off_work);
eb594b73
UH
2081 if (genpd_is_cpu_domain(genpd))
2082 free_cpumask_var(genpd->cpus);
49a27e27
UH
2083 if (genpd->free_states)
2084 genpd->free_states(genpd->states, genpd->state_count);
2085
3fe57710
JH
2086 pr_debug("%s: removed %s\n", __func__, genpd->name);
2087
2088 return 0;
2089}
2090
2091/**
2092 * pm_genpd_remove - Remove a generic I/O PM domain
2093 * @genpd: Pointer to PM domain that is to be removed.
2094 *
2095 * To remove the PM domain, this function:
2096 * - Removes the PM domain as a subdomain to any parent domains,
2097 * if it was added.
2098 * - Removes the PM domain from the list of registered PM domains.
2099 *
2100 * The PM domain will only be removed, if the associated provider has
2101 * been removed, it is not a parent to any other PM domain and has no
2102 * devices associated with it.
2103 */
2104int pm_genpd_remove(struct generic_pm_domain *genpd)
2105{
2106 int ret;
2107
2108 mutex_lock(&gpd_list_lock);
2109 ret = genpd_remove(genpd);
2110 mutex_unlock(&gpd_list_lock);
2111
2112 return ret;
2113}
2114EXPORT_SYMBOL_GPL(pm_genpd_remove);
2115
aa42240a 2116#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
892ebdcc 2117
aa42240a
TF
2118/*
2119 * Device Tree based PM domain providers.
2120 *
2121 * The code below implements generic device tree based PM domain providers that
2122 * bind device tree nodes with generic PM domains registered in the system.
2123 *
2124 * Any driver that registers generic PM domains and needs to support binding of
2125 * devices to these domains is supposed to register a PM domain provider, which
2126 * maps a PM domain specifier retrieved from the device tree to a PM domain.
2127 *
2128 * Two simple mapping functions have been provided for convenience:
892ebdcc
JH
2129 * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
2130 * - genpd_xlate_onecell() for mapping of multiple PM domains per node by
aa42240a
TF
2131 * index.
2132 */
2133
2134/**
2135 * struct of_genpd_provider - PM domain provider registration structure
2136 * @link: Entry in global list of PM domain providers
2137 * @node: Pointer to device tree node of PM domain provider
2138 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
2139 * into a PM domain.
2140 * @data: context pointer to be passed into @xlate callback
2141 */
2142struct of_genpd_provider {
2143 struct list_head link;
2144 struct device_node *node;
2145 genpd_xlate_t xlate;
2146 void *data;
2147};
2148
2149/* List of registered PM domain providers. */
2150static LIST_HEAD(of_genpd_providers);
2151/* Mutex to protect the list above. */
2152static DEFINE_MUTEX(of_genpd_mutex);
2153
2154/**
892ebdcc 2155 * genpd_xlate_simple() - Xlate function for direct node-domain mapping
aa42240a
TF
2156 * @genpdspec: OF phandle args to map into a PM domain
2157 * @data: xlate function private data - pointer to struct generic_pm_domain
2158 *
2159 * This is a generic xlate function that can be used to model PM domains that
2160 * have their own device tree nodes. The private data of xlate function needs
2161 * to be a valid pointer to struct generic_pm_domain.
2162 */
892ebdcc 2163static struct generic_pm_domain *genpd_xlate_simple(
aa42240a
TF
2164 struct of_phandle_args *genpdspec,
2165 void *data)
2166{
aa42240a
TF
2167 return data;
2168}
aa42240a
TF
2169
2170/**
892ebdcc 2171 * genpd_xlate_onecell() - Xlate function using a single index.
aa42240a
TF
2172 * @genpdspec: OF phandle args to map into a PM domain
2173 * @data: xlate function private data - pointer to struct genpd_onecell_data
2174 *
2175 * This is a generic xlate function that can be used to model simple PM domain
2176 * controllers that have one device tree node and provide multiple PM domains.
2177 * A single cell is used as an index into an array of PM domains specified in
2178 * the genpd_onecell_data struct when registering the provider.
2179 */
892ebdcc 2180static struct generic_pm_domain *genpd_xlate_onecell(
aa42240a
TF
2181 struct of_phandle_args *genpdspec,
2182 void *data)
2183{
2184 struct genpd_onecell_data *genpd_data = data;
2185 unsigned int idx = genpdspec->args[0];
2186
2187 if (genpdspec->args_count != 1)
2188 return ERR_PTR(-EINVAL);
2189
2190 if (idx >= genpd_data->num_domains) {
2191 pr_err("%s: invalid domain index %u\n", __func__, idx);
2192 return ERR_PTR(-EINVAL);
2193 }
2194
2195 if (!genpd_data->domains[idx])
2196 return ERR_PTR(-ENOENT);
2197
2198 return genpd_data->domains[idx];
2199}
aa42240a
TF
2200
2201/**
892ebdcc 2202 * genpd_add_provider() - Register a PM domain provider for a node
aa42240a
TF
2203 * @np: Device node pointer associated with the PM domain provider.
2204 * @xlate: Callback for decoding PM domain from phandle arguments.
2205 * @data: Context pointer for @xlate callback.
2206 */
892ebdcc
JH
2207static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2208 void *data)
aa42240a
TF
2209{
2210 struct of_genpd_provider *cp;
2211
2212 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2213 if (!cp)
2214 return -ENOMEM;
2215
2216 cp->node = of_node_get(np);
2217 cp->data = data;
2218 cp->xlate = xlate;
bab2d712 2219 fwnode_dev_initialized(&np->fwnode, true);
aa42240a
TF
2220
2221 mutex_lock(&of_genpd_mutex);
2222 list_add(&cp->link, &of_genpd_providers);
2223 mutex_unlock(&of_genpd_mutex);
ea11e94b 2224 pr_debug("Added domain provider from %pOF\n", np);
aa42240a
TF
2225
2226 return 0;
2227}
892ebdcc 2228
fe0c2baa
UH
2229static bool genpd_present(const struct generic_pm_domain *genpd)
2230{
40ba55e4 2231 bool ret = false;
fe0c2baa
UH
2232 const struct generic_pm_domain *gpd;
2233
40ba55e4
SB
2234 mutex_lock(&gpd_list_lock);
2235 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2236 if (gpd == genpd) {
2237 ret = true;
2238 break;
2239 }
2240 }
2241 mutex_unlock(&gpd_list_lock);
2242
2243 return ret;
fe0c2baa
UH
2244}
2245
892ebdcc
JH
2246/**
2247 * of_genpd_add_provider_simple() - Register a simple PM domain provider
2248 * @np: Device node pointer associated with the PM domain provider.
2249 * @genpd: Pointer to PM domain associated with the PM domain provider.
2250 */
2251int of_genpd_add_provider_simple(struct device_node *np,
2252 struct generic_pm_domain *genpd)
2253{
40ba55e4 2254 int ret;
0159ec67
JH
2255
2256 if (!np || !genpd)
2257 return -EINVAL;
2258
6a0ae73d 2259 if (!genpd_present(genpd))
40ba55e4 2260 return -EINVAL;
6a0ae73d
VK
2261
2262 genpd->dev.of_node = np;
2263
2264 /* Parse genpd OPP table */
2265 if (genpd->set_performance_state) {
2266 ret = dev_pm_opp_of_add_table(&genpd->dev);
9a6582b8
AF
2267 if (ret)
2268 return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
1067ae3e
VK
2269
2270 /*
2271 * Save table for faster processing while setting performance
2272 * state.
2273 */
2274 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
dd461cd9 2275 WARN_ON(IS_ERR(genpd->opp_table));
de0aa06d
JH
2276 }
2277
6a0ae73d
VK
2278 ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2279 if (ret) {
1067ae3e
VK
2280 if (genpd->set_performance_state) {
2281 dev_pm_opp_put_opp_table(genpd->opp_table);
6a0ae73d 2282 dev_pm_opp_of_remove_table(&genpd->dev);
1067ae3e 2283 }
6a0ae73d 2284
40ba55e4 2285 return ret;
6a0ae73d
VK
2286 }
2287
2288 genpd->provider = &np->fwnode;
2289 genpd->has_provider = true;
2290
40ba55e4 2291 return 0;
892ebdcc
JH
2292}
2293EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2294
2295/**
2296 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2297 * @np: Device node pointer associated with the PM domain provider.
2298 * @data: Pointer to the data associated with the PM domain provider.
2299 */
2300int of_genpd_add_provider_onecell(struct device_node *np,
2301 struct genpd_onecell_data *data)
2302{
6a0ae73d 2303 struct generic_pm_domain *genpd;
0159ec67 2304 unsigned int i;
de0aa06d 2305 int ret = -EINVAL;
0159ec67
JH
2306
2307 if (!np || !data)
2308 return -EINVAL;
2309
40845524
TR
2310 if (!data->xlate)
2311 data->xlate = genpd_xlate_onecell;
2312
0159ec67 2313 for (i = 0; i < data->num_domains; i++) {
6a0ae73d
VK
2314 genpd = data->domains[i];
2315
2316 if (!genpd)
609bed67 2317 continue;
6a0ae73d 2318 if (!genpd_present(genpd))
de0aa06d
JH
2319 goto error;
2320
6a0ae73d
VK
2321 genpd->dev.of_node = np;
2322
2323 /* Parse genpd OPP table */
2324 if (genpd->set_performance_state) {
2325 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2326 if (ret) {
9a6582b8
AF
2327 dev_err_probe(&genpd->dev, ret,
2328 "Failed to add OPP table for index %d\n", i);
6a0ae73d
VK
2329 goto error;
2330 }
1067ae3e
VK
2331
2332 /*
2333 * Save table for faster processing while setting
2334 * performance state.
2335 */
e77dcb0b 2336 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
dd461cd9 2337 WARN_ON(IS_ERR(genpd->opp_table));
6a0ae73d
VK
2338 }
2339
2340 genpd->provider = &np->fwnode;
2341 genpd->has_provider = true;
0159ec67
JH
2342 }
2343
40845524 2344 ret = genpd_add_provider(np, data->xlate, data);
de0aa06d
JH
2345 if (ret < 0)
2346 goto error;
2347
de0aa06d
JH
2348 return 0;
2349
2350error:
2351 while (i--) {
6a0ae73d
VK
2352 genpd = data->domains[i];
2353
2354 if (!genpd)
609bed67 2355 continue;
6a0ae73d
VK
2356
2357 genpd->provider = NULL;
2358 genpd->has_provider = false;
2359
1067ae3e
VK
2360 if (genpd->set_performance_state) {
2361 dev_pm_opp_put_opp_table(genpd->opp_table);
6a0ae73d 2362 dev_pm_opp_of_remove_table(&genpd->dev);
1067ae3e 2363 }
de0aa06d 2364 }
0159ec67 2365
0159ec67 2366 return ret;
892ebdcc
JH
2367}
2368EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
aa42240a
TF
2369
2370/**
2371 * of_genpd_del_provider() - Remove a previously registered PM domain provider
2372 * @np: Device node pointer associated with the PM domain provider
2373 */
2374void of_genpd_del_provider(struct device_node *np)
2375{
b556b15d 2376 struct of_genpd_provider *cp, *tmp;
de0aa06d 2377 struct generic_pm_domain *gpd;
aa42240a 2378
de0aa06d 2379 mutex_lock(&gpd_list_lock);
aa42240a 2380 mutex_lock(&of_genpd_mutex);
b556b15d 2381 list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
aa42240a 2382 if (cp->node == np) {
de0aa06d
JH
2383 /*
2384 * For each PM domain associated with the
2385 * provider, set the 'has_provider' to false
2386 * so that the PM domain can be safely removed.
2387 */
6a0ae73d
VK
2388 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2389 if (gpd->provider == &np->fwnode) {
de0aa06d
JH
2390 gpd->has_provider = false;
2391
6a0ae73d
VK
2392 if (!gpd->set_performance_state)
2393 continue;
2394
1067ae3e 2395 dev_pm_opp_put_opp_table(gpd->opp_table);
6a0ae73d
VK
2396 dev_pm_opp_of_remove_table(&gpd->dev);
2397 }
2398 }
2399
bab2d712 2400 fwnode_dev_initialized(&cp->node->fwnode, false);
aa42240a
TF
2401 list_del(&cp->link);
2402 of_node_put(cp->node);
2403 kfree(cp);
2404 break;
2405 }
2406 }
2407 mutex_unlock(&of_genpd_mutex);
de0aa06d 2408 mutex_unlock(&gpd_list_lock);
aa42240a
TF
2409}
2410EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2411
2412/**
f58d4e5a 2413 * genpd_get_from_provider() - Look-up PM domain
aa42240a
TF
2414 * @genpdspec: OF phandle args to use for look-up
2415 *
2416 * Looks for a PM domain provider under the node specified by @genpdspec and if
2417 * found, uses xlate function of the provider to map phandle args to a PM
2418 * domain.
2419 *
2420 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2421 * on failure.
2422 */
f58d4e5a 2423static struct generic_pm_domain *genpd_get_from_provider(
aa42240a
TF
2424 struct of_phandle_args *genpdspec)
2425{
2426 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2427 struct of_genpd_provider *provider;
2428
41795a8a
JH
2429 if (!genpdspec)
2430 return ERR_PTR(-EINVAL);
2431
aa42240a
TF
2432 mutex_lock(&of_genpd_mutex);
2433
2434 /* Check if we have such a provider in our array */
2435 list_for_each_entry(provider, &of_genpd_providers, link) {
2436 if (provider->node == genpdspec->np)
2437 genpd = provider->xlate(genpdspec, provider->data);
2438 if (!IS_ERR(genpd))
2439 break;
2440 }
2441
2442 mutex_unlock(&of_genpd_mutex);
2443
2444 return genpd;
2445}
2446
ec69572b
JH
2447/**
2448 * of_genpd_add_device() - Add a device to an I/O PM domain
2449 * @genpdspec: OF phandle args to use for look-up PM domain
2450 * @dev: Device to be added.
2451 *
2452 * Looks-up an I/O PM domain based upon phandle args provided and adds
2453 * the device to the PM domain. Returns a negative error code on failure.
2454 */
2455int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
2456{
2457 struct generic_pm_domain *genpd;
19efa5ff
JH
2458 int ret;
2459
2460 mutex_lock(&gpd_list_lock);
ec69572b 2461
f58d4e5a 2462 genpd = genpd_get_from_provider(genpdspec);
19efa5ff
JH
2463 if (IS_ERR(genpd)) {
2464 ret = PTR_ERR(genpd);
2465 goto out;
2466 }
2467
f9ccd7c3 2468 ret = genpd_add_device(genpd, dev, dev);
ec69572b 2469
19efa5ff
JH
2470out:
2471 mutex_unlock(&gpd_list_lock);
2472
2473 return ret;
ec69572b
JH
2474}
2475EXPORT_SYMBOL_GPL(of_genpd_add_device);
2476
2477/**
2478 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2479 * @parent_spec: OF phandle args to use for parent PM domain look-up
2480 * @subdomain_spec: OF phandle args to use for subdomain look-up
2481 *
2482 * Looks-up a parent PM domain and subdomain based upon phandle args
2483 * provided and adds the subdomain to the parent PM domain. Returns a
2484 * negative error code on failure.
2485 */
2486int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
2487 struct of_phandle_args *subdomain_spec)
2488{
2489 struct generic_pm_domain *parent, *subdomain;
19efa5ff
JH
2490 int ret;
2491
2492 mutex_lock(&gpd_list_lock);
ec69572b 2493
f58d4e5a 2494 parent = genpd_get_from_provider(parent_spec);
19efa5ff
JH
2495 if (IS_ERR(parent)) {
2496 ret = PTR_ERR(parent);
2497 goto out;
2498 }
ec69572b 2499
f58d4e5a 2500 subdomain = genpd_get_from_provider(subdomain_spec);
19efa5ff
JH
2501 if (IS_ERR(subdomain)) {
2502 ret = PTR_ERR(subdomain);
2503 goto out;
2504 }
2505
2506 ret = genpd_add_subdomain(parent, subdomain);
ec69572b 2507
19efa5ff
JH
2508out:
2509 mutex_unlock(&gpd_list_lock);
2510
18027d6f 2511 return ret == -ENOENT ? -EPROBE_DEFER : ret;
ec69572b
JH
2512}
2513EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2514
dedd1492
UH
2515/**
2516 * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2517 * @parent_spec: OF phandle args to use for parent PM domain look-up
2518 * @subdomain_spec: OF phandle args to use for subdomain look-up
2519 *
2520 * Looks-up a parent PM domain and subdomain based upon phandle args
2521 * provided and removes the subdomain from the parent PM domain. Returns a
2522 * negative error code on failure.
2523 */
2524int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
2525 struct of_phandle_args *subdomain_spec)
2526{
2527 struct generic_pm_domain *parent, *subdomain;
2528 int ret;
2529
2530 mutex_lock(&gpd_list_lock);
2531
2532 parent = genpd_get_from_provider(parent_spec);
2533 if (IS_ERR(parent)) {
2534 ret = PTR_ERR(parent);
2535 goto out;
2536 }
2537
2538 subdomain = genpd_get_from_provider(subdomain_spec);
2539 if (IS_ERR(subdomain)) {
2540 ret = PTR_ERR(subdomain);
2541 goto out;
2542 }
2543
2544 ret = pm_genpd_remove_subdomain(parent, subdomain);
2545
2546out:
2547 mutex_unlock(&gpd_list_lock);
2548
2549 return ret;
2550}
2551EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
2552
17926551
JH
2553/**
2554 * of_genpd_remove_last - Remove the last PM domain registered for a provider
763663c9 2555 * @np: Pointer to device node associated with provider
17926551
JH
2556 *
2557 * Find the last PM domain that was added by a particular provider and
2558 * remove this PM domain from the list of PM domains. The provider is
2559 * identified by the 'provider' device structure that is passed. The PM
2560 * domain will only be removed, if the provider associated with domain
2561 * has been removed.
2562 *
2563 * Returns a valid pointer to struct generic_pm_domain on success or
2564 * ERR_PTR() on failure.
2565 */
2566struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2567{
a7e2d1bc 2568 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
17926551
JH
2569 int ret;
2570
2571 if (IS_ERR_OR_NULL(np))
2572 return ERR_PTR(-EINVAL);
2573
2574 mutex_lock(&gpd_list_lock);
a7e2d1bc 2575 list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
17926551
JH
2576 if (gpd->provider == &np->fwnode) {
2577 ret = genpd_remove(gpd);
2578 genpd = ret ? ERR_PTR(ret) : gpd;
2579 break;
2580 }
2581 }
2582 mutex_unlock(&gpd_list_lock);
2583
2584 return genpd;
2585}
2586EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2587
3c095f32
UH
2588static void genpd_release_dev(struct device *dev)
2589{
e8b04de9 2590 of_node_put(dev->of_node);
3c095f32
UH
2591 kfree(dev);
2592}
2593
2594static struct bus_type genpd_bus_type = {
2595 .name = "genpd",
2596};
2597
aa42240a
TF
2598/**
2599 * genpd_dev_pm_detach - Detach a device from its PM domain.
8bb6944e 2600 * @dev: Device to detach.
aa42240a
TF
2601 * @power_off: Currently not used
2602 *
2603 * Try to locate a corresponding generic PM domain, which the device was
2604 * attached to previously. If such is found, the device is detached from it.
2605 */
2606static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2607{
446d999c 2608 struct generic_pm_domain *pd;
93af5e93 2609 unsigned int i;
aa42240a
TF
2610 int ret = 0;
2611
85168d56
UH
2612 pd = dev_to_genpd(dev);
2613 if (IS_ERR(pd))
aa42240a
TF
2614 return;
2615
2616 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2617
c016baf7
RN
2618 /* Drop the default performance state */
2619 if (dev_gpd_data(dev)->default_pstate) {
2620 dev_pm_genpd_set_performance_state(dev, 0);
2621 dev_gpd_data(dev)->default_pstate = 0;
2622 }
2623
93af5e93 2624 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
85168d56 2625 ret = genpd_remove_device(pd, dev);
aa42240a
TF
2626 if (ret != -EAGAIN)
2627 break;
93af5e93
GU
2628
2629 mdelay(i);
aa42240a
TF
2630 cond_resched();
2631 }
2632
2633 if (ret < 0) {
2634 dev_err(dev, "failed to remove from PM domain %s: %d",
2635 pd->name, ret);
2636 return;
2637 }
2638
2639 /* Check if PM domain can be powered off after removing this device. */
2640 genpd_queue_power_off_work(pd);
3c095f32
UH
2641
2642 /* Unregister the device if it was created by genpd. */
2643 if (dev->bus == &genpd_bus_type)
2644 device_unregister(dev);
aa42240a
TF
2645}
2646
632f7ce3
RK
2647static void genpd_dev_pm_sync(struct device *dev)
2648{
2649 struct generic_pm_domain *pd;
2650
2651 pd = dev_to_genpd(dev);
2652 if (IS_ERR(pd))
2653 return;
2654
2655 genpd_queue_power_off_work(pd);
2656}
2657
51dcf748
UH
2658static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
2659 unsigned int index, bool power_on)
aa42240a
TF
2660{
2661 struct of_phandle_args pd_args;
2662 struct generic_pm_domain *pd;
c016baf7 2663 int pstate;
aa42240a
TF
2664 int ret;
2665
e8b04de9 2666 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
8cb1cbd6 2667 "#power-domain-cells", index, &pd_args);
001d50c9 2668 if (ret < 0)
bcd931f2 2669 return ret;
aa42240a 2670
19efa5ff 2671 mutex_lock(&gpd_list_lock);
f58d4e5a 2672 pd = genpd_get_from_provider(&pd_args);
265e2cf6 2673 of_node_put(pd_args.np);
aa42240a 2674 if (IS_ERR(pd)) {
19efa5ff 2675 mutex_unlock(&gpd_list_lock);
aa42240a
TF
2676 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2677 __func__, PTR_ERR(pd));
51dcf748 2678 return driver_deferred_probe_check_state(base_dev);
aa42240a
TF
2679 }
2680
2681 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2682
f9ccd7c3 2683 ret = genpd_add_device(pd, dev, base_dev);
19efa5ff 2684 mutex_unlock(&gpd_list_lock);
aa42240a 2685
9a6582b8
AF
2686 if (ret < 0)
2687 return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name);
aa42240a
TF
2688
2689 dev->pm_domain->detach = genpd_dev_pm_detach;
632f7ce3 2690 dev->pm_domain->sync = genpd_dev_pm_sync;
aa42240a 2691
895b6612
UH
2692 if (power_on) {
2693 genpd_lock(pd);
2694 ret = genpd_power_on(pd, 0);
2695 genpd_unlock(pd);
2696 }
72038df3 2697
c016baf7 2698 if (ret) {
72038df3 2699 genpd_remove_device(pd, dev);
c016baf7
RN
2700 return -EPROBE_DEFER;
2701 }
919b7308 2702
c016baf7
RN
2703 /* Set the default performance state */
2704 pstate = of_get_required_opp_performance_state(dev->of_node, index);
65616418 2705 if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) {
c016baf7
RN
2706 ret = pstate;
2707 goto err;
2708 } else if (pstate > 0) {
2709 ret = dev_pm_genpd_set_performance_state(dev, pstate);
2710 if (ret)
2711 goto err;
2712 dev_gpd_data(dev)->default_pstate = pstate;
2713 }
2714 return 1;
2715
2716err:
2717 dev_err(dev, "failed to set required performance state for power-domain %s: %d\n",
2718 pd->name, ret);
2719 genpd_remove_device(pd, dev);
2720 return ret;
aa42240a 2721}
8cb1cbd6
UH
2722
2723/**
2724 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2725 * @dev: Device to attach.
2726 *
2727 * Parse device's OF node to find a PM domain specifier. If such is found,
2728 * attaches the device to retrieved pm_domain ops.
2729 *
2730 * Returns 1 on successfully attached PM domain, 0 when the device don't need a
2731 * PM domain or when multiple power-domains exists for it, else a negative error
2732 * code. Note that if a power-domain exists for the device, but it cannot be
2733 * found or turned on, then return -EPROBE_DEFER to ensure that the device is
2734 * not probed and to re-try again later.
2735 */
2736int genpd_dev_pm_attach(struct device *dev)
2737{
2738 if (!dev->of_node)
2739 return 0;
2740
2741 /*
2742 * Devices with multiple PM domains must be attached separately, as we
2743 * can only attach one PM domain per device.
2744 */
2745 if (of_count_phandle_with_args(dev->of_node, "power-domains",
2746 "#power-domain-cells") != 1)
2747 return 0;
2748
51dcf748 2749 return __genpd_dev_pm_attach(dev, dev, 0, true);
8cb1cbd6 2750}
aa42240a 2751EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
30f60428 2752
3c095f32
UH
2753/**
2754 * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
2755 * @dev: The device used to lookup the PM domain.
2756 * @index: The index of the PM domain.
2757 *
2758 * Parse device's OF node to find a PM domain specifier at the provided @index.
2759 * If such is found, creates a virtual device and attaches it to the retrieved
2760 * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
2761 * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
2762 *
2763 * Returns the created virtual device if successfully attached PM domain, NULL
2764 * when the device don't need a PM domain, else an ERR_PTR() in case of
2765 * failures. If a power-domain exists for the device, but cannot be found or
2766 * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
2767 * is not probed and to re-try again later.
2768 */
2769struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2770 unsigned int index)
2771{
560928b2 2772 struct device *virt_dev;
3c095f32
UH
2773 int num_domains;
2774 int ret;
2775
2776 if (!dev->of_node)
2777 return NULL;
2778
3ccf3f0c 2779 /* Verify that the index is within a valid range. */
3c095f32
UH
2780 num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
2781 "#power-domain-cells");
3ccf3f0c 2782 if (index >= num_domains)
3c095f32
UH
2783 return NULL;
2784
2785 /* Allocate and register device on the genpd bus. */
560928b2
VK
2786 virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
2787 if (!virt_dev)
3c095f32
UH
2788 return ERR_PTR(-ENOMEM);
2789
560928b2
VK
2790 dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2791 virt_dev->bus = &genpd_bus_type;
2792 virt_dev->release = genpd_release_dev;
e8b04de9 2793 virt_dev->of_node = of_node_get(dev->of_node);
3c095f32 2794
560928b2 2795 ret = device_register(virt_dev);
3c095f32 2796 if (ret) {
71b77697 2797 put_device(virt_dev);
3c095f32
UH
2798 return ERR_PTR(ret);
2799 }
2800
2801 /* Try to attach the device to the PM domain at the specified index. */
51dcf748 2802 ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
3c095f32 2803 if (ret < 1) {
560928b2 2804 device_unregister(virt_dev);
3c095f32
UH
2805 return ret ? ERR_PTR(ret) : NULL;
2806 }
2807
560928b2
VK
2808 pm_runtime_enable(virt_dev);
2809 genpd_queue_power_off_work(dev_to_genpd(virt_dev));
3c095f32 2810
560928b2 2811 return virt_dev;
3c095f32
UH
2812}
2813EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
2814
5d6be70a
UH
2815/**
2816 * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
2817 * @dev: The device used to lookup the PM domain.
2818 * @name: The name of the PM domain.
2819 *
2820 * Parse device's OF node to find a PM domain specifier using the
2821 * power-domain-names DT property. For further description see
2822 * genpd_dev_pm_attach_by_id().
2823 */
7416f1f2 2824struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
5d6be70a
UH
2825{
2826 int index;
2827
2828 if (!dev->of_node)
2829 return NULL;
2830
2831 index = of_property_match_string(dev->of_node, "power-domain-names",
2832 name);
2833 if (index < 0)
2834 return NULL;
2835
2836 return genpd_dev_pm_attach_by_id(dev, index);
2837}
2838
30f60428 2839static const struct of_device_id idle_state_match[] = {
598da548 2840 { .compatible = "domain-idle-state", },
30f60428
LI
2841 { }
2842};
2843
2844static int genpd_parse_state(struct genpd_power_state *genpd_state,
2845 struct device_node *state_node)
2846{
2847 int err;
2848 u32 residency;
2849 u32 entry_latency, exit_latency;
30f60428
LI
2850
2851 err = of_property_read_u32(state_node, "entry-latency-us",
2852 &entry_latency);
2853 if (err) {
ea11e94b 2854 pr_debug(" * %pOF missing entry-latency-us property\n",
7a5bd127 2855 state_node);
30f60428
LI
2856 return -EINVAL;
2857 }
2858
2859 err = of_property_read_u32(state_node, "exit-latency-us",
2860 &exit_latency);
2861 if (err) {
ea11e94b 2862 pr_debug(" * %pOF missing exit-latency-us property\n",
7a5bd127 2863 state_node);
30f60428
LI
2864 return -EINVAL;
2865 }
2866
2867 err = of_property_read_u32(state_node, "min-residency-us", &residency);
2868 if (!err)
2869 genpd_state->residency_ns = 1000 * residency;
2870
2871 genpd_state->power_on_latency_ns = 1000 * exit_latency;
2872 genpd_state->power_off_latency_ns = 1000 * entry_latency;
0c9b694a 2873 genpd_state->fwnode = &state_node->fwnode;
30f60428
LI
2874
2875 return 0;
2876}
2877
a3381e3a
UH
2878static int genpd_iterate_idle_states(struct device_node *dn,
2879 struct genpd_power_state *states)
2880{
2881 int ret;
2882 struct of_phandle_iterator it;
2883 struct device_node *np;
2884 int i = 0;
2885
2886 ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2887 if (ret <= 0)
56cb2689 2888 return ret == -ENOENT ? 0 : ret;
a3381e3a
UH
2889
2890 /* Loop over the phandles until all the requested entry is found */
2891 of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
2892 np = it.node;
2893 if (!of_match_node(idle_state_match, np))
2894 continue;
2895 if (states) {
2896 ret = genpd_parse_state(&states[i], np);
2897 if (ret) {
2898 pr_err("Parsing idle state node %pOF failed with err %d\n",
2899 np, ret);
2900 of_node_put(np);
2901 return ret;
2902 }
2903 }
2904 i++;
2905 }
2906
2907 return i;
2908}
2909
30f60428
LI
2910/**
2911 * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2912 *
2913 * @dn: The genpd device node
2914 * @states: The pointer to which the state array will be saved.
2915 * @n: The count of elements in the array returned from this function.
2916 *
2917 * Returns the device states parsed from the OF node. The memory for the states
2918 * is allocated by this function and is the responsibility of the caller to
2c361684
UH
2919 * free the memory after use. If any or zero compatible domain idle states is
2920 * found it returns 0 and in case of errors, a negative error code is returned.
30f60428
LI
2921 */
2922int of_genpd_parse_idle_states(struct device_node *dn,
2923 struct genpd_power_state **states, int *n)
2924{
2925 struct genpd_power_state *st;
a3381e3a 2926 int ret;
30f60428 2927
a3381e3a 2928 ret = genpd_iterate_idle_states(dn, NULL);
2c361684
UH
2929 if (ret < 0)
2930 return ret;
2931
2932 if (!ret) {
2933 *states = NULL;
2934 *n = 0;
2935 return 0;
2936 }
30f60428 2937
a3381e3a 2938 st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
30f60428
LI
2939 if (!st)
2940 return -ENOMEM;
2941
a3381e3a
UH
2942 ret = genpd_iterate_idle_states(dn, st);
2943 if (ret <= 0) {
2944 kfree(st);
2945 return ret < 0 ? ret : -EINVAL;
30f60428
LI
2946 }
2947
a3381e3a
UH
2948 *states = st;
2949 *n = ret;
30f60428
LI
2950
2951 return 0;
2952}
2953EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
2954
e38f89d3
VK
2955/**
2956 * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
2957 *
2958 * @genpd_dev: Genpd's device for which the performance-state needs to be found.
2959 * @opp: struct dev_pm_opp of the OPP for which we need to find performance
2960 * state.
2961 *
2962 * Returns performance state encoded in the OPP of the genpd. This calls
2963 * platform specific genpd->opp_to_performance_state() callback to translate
2964 * power domain OPP to performance state.
2965 *
2966 * Returns performance state on success and 0 on failure.
2967 */
2968unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
2969 struct dev_pm_opp *opp)
2970{
2971 struct generic_pm_domain *genpd = NULL;
2972 int state;
2973
2974 genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
2975
2976 if (unlikely(!genpd->opp_to_performance_state))
2977 return 0;
2978
2979 genpd_lock(genpd);
2980 state = genpd->opp_to_performance_state(genpd, opp);
2981 genpd_unlock(genpd);
2982
2983 return state;
2984}
2985EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
2986
3c095f32
UH
2987static int __init genpd_bus_init(void)
2988{
2989 return bus_register(&genpd_bus_type);
2990}
2991core_initcall(genpd_bus_init);
2992
d30d819d 2993#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2bd5306a
MM
2994
2995
2996/*** debugfs support ***/
2997
8b0510b5 2998#ifdef CONFIG_DEBUG_FS
2bd5306a
MM
2999/*
3000 * TODO: This function is a slightly modified version of rtpm_status_show
d30d819d 3001 * from sysfs.c, so generalize it.
2bd5306a 3002 */
2bd5306a
MM
3003static void rtpm_status_str(struct seq_file *s, struct device *dev)
3004{
3005 static const char * const status_lookup[] = {
3006 [RPM_ACTIVE] = "active",
3007 [RPM_RESUMING] = "resuming",
3008 [RPM_SUSPENDED] = "suspended",
3009 [RPM_SUSPENDING] = "suspending"
3010 };
3011 const char *p = "";
3012
3013 if (dev->power.runtime_error)
3014 p = "error";
3015 else if (dev->power.disable_depth)
3016 p = "unsupported";
3017 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
3018 p = status_lookup[dev->power.runtime_status];
3019 else
3020 WARN_ON(1);
3021
45fbc464
DO
3022 seq_printf(s, "%-25s ", p);
3023}
3024
3025static void perf_status_str(struct seq_file *s, struct device *dev)
3026{
3027 struct generic_pm_domain_data *gpd_data;
3028
3029 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
3030 seq_put_decimal_ull(s, "", gpd_data->performance_state);
2bd5306a 3031}
2bd5306a 3032
9e9704ea
UH
3033static int genpd_summary_one(struct seq_file *s,
3034 struct generic_pm_domain *genpd)
2bd5306a
MM
3035{
3036 static const char * const status_lookup[] = {
49f618e1
UH
3037 [GENPD_STATE_ON] = "on",
3038 [GENPD_STATE_OFF] = "off"
2bd5306a
MM
3039 };
3040 struct pm_domain_data *pm_data;
3041 const char *kobj_path;
3042 struct gpd_link *link;
6954d432 3043 char state[16];
2bd5306a
MM
3044 int ret;
3045
35241d12 3046 ret = genpd_lock_interruptible(genpd);
2bd5306a
MM
3047 if (ret)
3048 return -ERESTARTSYS;
3049
66a5ca4b 3050 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2bd5306a 3051 goto exit;
41e2c8e0 3052 if (!genpd_status_on(genpd))
0ba554e4 3053 snprintf(state, sizeof(state), "%s-%u",
6954d432 3054 status_lookup[genpd->status], genpd->state_idx);
fc5cbf0c 3055 else
6954d432
GU
3056 snprintf(state, sizeof(state), "%s",
3057 status_lookup[genpd->status]);
45fbc464 3058 seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state);
2bd5306a
MM
3059
3060 /*
3061 * Modifications on the list require holding locks on both
8d87ae48 3062 * parent and child, so we are safe.
66a5ca4b 3063 * Also genpd->name is immutable.
2bd5306a 3064 */
8d87ae48 3065 list_for_each_entry(link, &genpd->parent_links, parent_node) {
45fbc464
DO
3066 if (list_is_first(&link->parent_node, &genpd->parent_links))
3067 seq_printf(s, "\n%48s", " ");
8d87ae48
KC
3068 seq_printf(s, "%s", link->child->name);
3069 if (!list_is_last(&link->parent_node, &genpd->parent_links))
2bd5306a
MM
3070 seq_puts(s, ", ");
3071 }
3072
66a5ca4b 3073 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
d716f479
LI
3074 kobj_path = kobject_get_path(&pm_data->dev->kobj,
3075 genpd_is_irq_safe(genpd) ?
3076 GFP_ATOMIC : GFP_KERNEL);
2bd5306a
MM
3077 if (kobj_path == NULL)
3078 continue;
3079
3080 seq_printf(s, "\n %-50s ", kobj_path);
3081 rtpm_status_str(s, pm_data->dev);
45fbc464 3082 perf_status_str(s, pm_data->dev);
2bd5306a
MM
3083 kfree(kobj_path);
3084 }
3085
3086 seq_puts(s, "\n");
3087exit:
35241d12 3088 genpd_unlock(genpd);
2bd5306a
MM
3089
3090 return 0;
3091}
3092
d32dcc6c 3093static int summary_show(struct seq_file *s, void *data)
2bd5306a 3094{
66a5ca4b 3095 struct generic_pm_domain *genpd;
2bd5306a
MM
3096 int ret = 0;
3097
45fbc464 3098 seq_puts(s, "domain status children performance\n");
15dec67a 3099 seq_puts(s, " /device runtime status\n");
45fbc464 3100 seq_puts(s, "----------------------------------------------------------------------------------------------\n");
2bd5306a
MM
3101
3102 ret = mutex_lock_interruptible(&gpd_list_lock);
3103 if (ret)
3104 return -ERESTARTSYS;
3105
66a5ca4b 3106 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
9e9704ea 3107 ret = genpd_summary_one(s, genpd);
2bd5306a
MM
3108 if (ret)
3109 break;
3110 }
3111 mutex_unlock(&gpd_list_lock);
3112
3113 return ret;
3114}
3115
d32dcc6c 3116static int status_show(struct seq_file *s, void *data)
2bd5306a 3117{
b6a1d093 3118 static const char * const status_lookup[] = {
49f618e1
UH
3119 [GENPD_STATE_ON] = "on",
3120 [GENPD_STATE_OFF] = "off"
b6a1d093
TG
3121 };
3122
3123 struct generic_pm_domain *genpd = s->private;
3124 int ret = 0;
3125
3126 ret = genpd_lock_interruptible(genpd);
3127 if (ret)
3128 return -ERESTARTSYS;
3129
3130 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
3131 goto exit;
3132
49f618e1 3133 if (genpd->status == GENPD_STATE_OFF)
b6a1d093
TG
3134 seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
3135 genpd->state_idx);
3136 else
3137 seq_printf(s, "%s\n", status_lookup[genpd->status]);
3138exit:
3139 genpd_unlock(genpd);
3140 return ret;
2bd5306a
MM
3141}
3142
d32dcc6c 3143static int sub_domains_show(struct seq_file *s, void *data)
b6a1d093
TG
3144{
3145 struct generic_pm_domain *genpd = s->private;
3146 struct gpd_link *link;
3147 int ret = 0;
3148
3149 ret = genpd_lock_interruptible(genpd);
3150 if (ret)
3151 return -ERESTARTSYS;
3152
8d87ae48
KC
3153 list_for_each_entry(link, &genpd->parent_links, parent_node)
3154 seq_printf(s, "%s\n", link->child->name);
b6a1d093
TG
3155
3156 genpd_unlock(genpd);
3157 return ret;
3158}
3159
d32dcc6c 3160static int idle_states_show(struct seq_file *s, void *data)
b6a1d093
TG
3161{
3162 struct generic_pm_domain *genpd = s->private;
bd40cbb0 3163 u64 now, delta, idle_time = 0;
b6a1d093
TG
3164 unsigned int i;
3165 int ret = 0;
3166
3167 ret = genpd_lock_interruptible(genpd);
3168 if (ret)
3169 return -ERESTARTSYS;
3170
c6a113b5 3171 seq_puts(s, "State Time Spent(ms) Usage Rejected\n");
b6a1d093
TG
3172
3173 for (i = 0; i < genpd->state_count; i++) {
bd40cbb0 3174 idle_time += genpd->states[i].idle_time;
b6a1d093 3175
bd40cbb0
UH
3176 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3177 now = ktime_get_mono_fast_ns();
3178 if (now > genpd->accounting_time) {
3179 delta = now - genpd->accounting_time;
3180 idle_time += delta;
3181 }
3182 }
b6a1d093 3183
bd40cbb0
UH
3184 do_div(idle_time, NSEC_PER_MSEC);
3185 seq_printf(s, "S%-13i %-14llu %-14llu %llu\n", i, idle_time,
3186 genpd->states[i].usage, genpd->states[i].rejected);
b6a1d093
TG
3187 }
3188
3189 genpd_unlock(genpd);
3190 return ret;
3191}
3192
d32dcc6c 3193static int active_time_show(struct seq_file *s, void *data)
b6a1d093
TG
3194{
3195 struct generic_pm_domain *genpd = s->private;
bd40cbb0 3196 u64 now, on_time, delta = 0;
b6a1d093
TG
3197 int ret = 0;
3198
3199 ret = genpd_lock_interruptible(genpd);
3200 if (ret)
3201 return -ERESTARTSYS;
3202
bd40cbb0
UH
3203 if (genpd->status == GENPD_STATE_ON) {
3204 now = ktime_get_mono_fast_ns();
3205 if (now > genpd->accounting_time)
3206 delta = now - genpd->accounting_time;
3207 }
b6a1d093 3208
bd40cbb0
UH
3209 on_time = genpd->on_time + delta;
3210 do_div(on_time, NSEC_PER_MSEC);
3211 seq_printf(s, "%llu ms\n", on_time);
b6a1d093
TG
3212
3213 genpd_unlock(genpd);
3214 return ret;
3215}
3216
d32dcc6c 3217static int total_idle_time_show(struct seq_file *s, void *data)
b6a1d093
TG
3218{
3219 struct generic_pm_domain *genpd = s->private;
bd40cbb0 3220 u64 now, delta, total = 0;
b6a1d093
TG
3221 unsigned int i;
3222 int ret = 0;
3223
3224 ret = genpd_lock_interruptible(genpd);
3225 if (ret)
3226 return -ERESTARTSYS;
3227
3228 for (i = 0; i < genpd->state_count; i++) {
bd40cbb0 3229 total += genpd->states[i].idle_time;
b6a1d093 3230
bd40cbb0
UH
3231 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3232 now = ktime_get_mono_fast_ns();
3233 if (now > genpd->accounting_time) {
3234 delta = now - genpd->accounting_time;
3235 total += delta;
3236 }
3237 }
b6a1d093 3238 }
b6a1d093 3239
bd40cbb0
UH
3240 do_div(total, NSEC_PER_MSEC);
3241 seq_printf(s, "%llu ms\n", total);
b6a1d093
TG
3242
3243 genpd_unlock(genpd);
3244 return ret;
3245}
3246
3247
d32dcc6c 3248static int devices_show(struct seq_file *s, void *data)
b6a1d093
TG
3249{
3250 struct generic_pm_domain *genpd = s->private;
3251 struct pm_domain_data *pm_data;
3252 const char *kobj_path;
3253 int ret = 0;
3254
3255 ret = genpd_lock_interruptible(genpd);
3256 if (ret)
3257 return -ERESTARTSYS;
3258
3259 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3260 kobj_path = kobject_get_path(&pm_data->dev->kobj,
3261 genpd_is_irq_safe(genpd) ?
3262 GFP_ATOMIC : GFP_KERNEL);
3263 if (kobj_path == NULL)
3264 continue;
3265
3266 seq_printf(s, "%s\n", kobj_path);
3267 kfree(kobj_path);
3268 }
3269
3270 genpd_unlock(genpd);
3271 return ret;
3272}
3273
d32dcc6c 3274static int perf_state_show(struct seq_file *s, void *data)
e8912812
RN
3275{
3276 struct generic_pm_domain *genpd = s->private;
3277
3278 if (genpd_lock_interruptible(genpd))
3279 return -ERESTARTSYS;
3280
3281 seq_printf(s, "%u\n", genpd->performance_state);
3282
3283 genpd_unlock(genpd);
3284 return 0;
3285}
3286
d32dcc6c
YL
3287DEFINE_SHOW_ATTRIBUTE(summary);
3288DEFINE_SHOW_ATTRIBUTE(status);
3289DEFINE_SHOW_ATTRIBUTE(sub_domains);
3290DEFINE_SHOW_ATTRIBUTE(idle_states);
3291DEFINE_SHOW_ATTRIBUTE(active_time);
3292DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3293DEFINE_SHOW_ATTRIBUTE(devices);
3294DEFINE_SHOW_ATTRIBUTE(perf_state);
2bd5306a 3295
718072ce 3296static void genpd_debug_add(struct generic_pm_domain *genpd)
2bd5306a
MM
3297{
3298 struct dentry *d;
718072ce
TS
3299
3300 if (!genpd_debugfs_dir)
3301 return;
3302
3303 d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
3304
3305 debugfs_create_file("current_state", 0444,
3306 d, genpd, &status_fops);
3307 debugfs_create_file("sub_domains", 0444,
3308 d, genpd, &sub_domains_fops);
3309 debugfs_create_file("idle_states", 0444,
3310 d, genpd, &idle_states_fops);
3311 debugfs_create_file("active_time", 0444,
3312 d, genpd, &active_time_fops);
3313 debugfs_create_file("total_idle_time", 0444,
3314 d, genpd, &total_idle_time_fops);
3315 debugfs_create_file("devices", 0444,
3316 d, genpd, &devices_fops);
3317 if (genpd->set_performance_state)
3318 debugfs_create_file("perf_state", 0444,
3319 d, genpd, &perf_state_fops);
3320}
3321
3322static int __init genpd_debug_init(void)
3323{
b6a1d093 3324 struct generic_pm_domain *genpd;
2bd5306a 3325
9e9704ea 3326 genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
2bd5306a 3327
e16a42c3
GKH
3328 debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3329 NULL, &summary_fops);
2bd5306a 3330
718072ce
TS
3331 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
3332 genpd_debug_add(genpd);
b6a1d093 3333
2bd5306a
MM
3334 return 0;
3335}
9e9704ea 3336late_initcall(genpd_debug_init);
2bd5306a 3337
9e9704ea 3338static void __exit genpd_debug_exit(void)
2bd5306a 3339{
9e9704ea 3340 debugfs_remove_recursive(genpd_debugfs_dir);
2bd5306a 3341}
9e9704ea 3342__exitcall(genpd_debug_exit);
8b0510b5 3343#endif /* CONFIG_DEBUG_FS */