PM: domains: enable domain idle state accounting
[linux-2.6-block.git] / drivers / base / power / domain.c
CommitLineData
5de363b6 1// SPDX-License-Identifier: GPL-2.0
f721889f
RW
2/*
3 * drivers/base/power/domain.c - Common code related to device power domains.
4 *
5 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
f721889f 6 */
7a5bd127
JP
7#define pr_fmt(fmt) "PM: " fmt
8
93af5e93 9#include <linux/delay.h>
f721889f
RW
10#include <linux/kernel.h>
11#include <linux/io.h>
aa42240a 12#include <linux/platform_device.h>
6a0ae73d 13#include <linux/pm_opp.h>
f721889f
RW
14#include <linux/pm_runtime.h>
15#include <linux/pm_domain.h>
6ff7bb0d 16#include <linux/pm_qos.h>
c11f6f5b 17#include <linux/pm_clock.h>
f721889f
RW
18#include <linux/slab.h>
19#include <linux/err.h>
17b75eca
RW
20#include <linux/sched.h>
21#include <linux/suspend.h>
d5e4cbfe 22#include <linux/export.h>
eb594b73 23#include <linux/cpu.h>
d5e4cbfe 24
aa8e54b5
TV
25#include "power.h"
26
93af5e93
GU
27#define GENPD_RETRY_MAX_MS 250 /* Approximate */
28
d5e4cbfe
RW
29#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
30({ \
31 type (*__routine)(struct device *__d); \
32 type __ret = (type)0; \
33 \
34 __routine = genpd->dev_ops.callback; \
35 if (__routine) { \
36 __ret = __routine(dev); \
d5e4cbfe
RW
37 } \
38 __ret; \
39})
f721889f 40
5125bbf3
RW
41static LIST_HEAD(gpd_list);
42static DEFINE_MUTEX(gpd_list_lock);
43
35241d12
LI
44struct genpd_lock_ops {
45 void (*lock)(struct generic_pm_domain *genpd);
46 void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
47 int (*lock_interruptible)(struct generic_pm_domain *genpd);
48 void (*unlock)(struct generic_pm_domain *genpd);
49};
50
51static void genpd_lock_mtx(struct generic_pm_domain *genpd)
52{
53 mutex_lock(&genpd->mlock);
54}
55
56static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
57 int depth)
58{
59 mutex_lock_nested(&genpd->mlock, depth);
60}
61
62static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
63{
64 return mutex_lock_interruptible(&genpd->mlock);
65}
66
67static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
68{
69 return mutex_unlock(&genpd->mlock);
70}
71
72static const struct genpd_lock_ops genpd_mtx_ops = {
73 .lock = genpd_lock_mtx,
74 .lock_nested = genpd_lock_nested_mtx,
75 .lock_interruptible = genpd_lock_interruptible_mtx,
76 .unlock = genpd_unlock_mtx,
77};
78
d716f479
LI
79static void genpd_lock_spin(struct generic_pm_domain *genpd)
80 __acquires(&genpd->slock)
81{
82 unsigned long flags;
83
84 spin_lock_irqsave(&genpd->slock, flags);
85 genpd->lock_flags = flags;
86}
87
88static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
89 int depth)
90 __acquires(&genpd->slock)
91{
92 unsigned long flags;
93
94 spin_lock_irqsave_nested(&genpd->slock, flags, depth);
95 genpd->lock_flags = flags;
96}
97
98static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
99 __acquires(&genpd->slock)
100{
101 unsigned long flags;
102
103 spin_lock_irqsave(&genpd->slock, flags);
104 genpd->lock_flags = flags;
105 return 0;
106}
107
108static void genpd_unlock_spin(struct generic_pm_domain *genpd)
109 __releases(&genpd->slock)
110{
111 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
112}
113
114static const struct genpd_lock_ops genpd_spin_ops = {
115 .lock = genpd_lock_spin,
116 .lock_nested = genpd_lock_nested_spin,
117 .lock_interruptible = genpd_lock_interruptible_spin,
118 .unlock = genpd_unlock_spin,
119};
120
35241d12
LI
121#define genpd_lock(p) p->lock_ops->lock(p)
122#define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
123#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
124#define genpd_unlock(p) p->lock_ops->unlock(p)
125
49f618e1 126#define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON)
d716f479 127#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
ffaa42e8 128#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
95a20ef6 129#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
eb594b73 130#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
ed61e18a 131#define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
d716f479
LI
132
133static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
d8600c8b 134 const struct generic_pm_domain *genpd)
d716f479
LI
135{
136 bool ret;
137
138 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
139
075c37d5
UH
140 /*
141 * Warn once if an IRQ safe device is attached to a no sleep domain, as
142 * to indicate a suboptimal configuration for PM. For an always on
143 * domain this isn't case, thus don't warn.
144 */
145 if (ret && !genpd_is_always_on(genpd))
d716f479
LI
146 dev_warn_once(dev, "PM domain %s will not be powered off\n",
147 genpd->name);
148
149 return ret;
150}
151
b3ad17c0
UH
152static int genpd_runtime_suspend(struct device *dev);
153
446d999c
RK
154/*
155 * Get the generic PM domain for a particular struct device.
156 * This validates the struct device pointer, the PM domain pointer,
157 * and checks that the PM domain pointer is a real generic PM domain.
158 * Any failure results in NULL being returned.
159 */
b3ad17c0 160static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
446d999c 161{
446d999c
RK
162 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
163 return NULL;
164
b3ad17c0
UH
165 /* A genpd's always have its ->runtime_suspend() callback assigned. */
166 if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
167 return pd_to_genpd(dev->pm_domain);
446d999c 168
b3ad17c0 169 return NULL;
446d999c
RK
170}
171
172/*
173 * This should only be used where we are certain that the pm_domain
174 * attached to the device is a genpd domain.
175 */
176static struct generic_pm_domain *dev_to_genpd(struct device *dev)
5248051b
RW
177{
178 if (IS_ERR_OR_NULL(dev->pm_domain))
179 return ERR_PTR(-EINVAL);
180
596ba34b 181 return pd_to_genpd(dev->pm_domain);
5248051b 182}
f721889f 183
d8600c8b
KK
184static int genpd_stop_dev(const struct generic_pm_domain *genpd,
185 struct device *dev)
d5e4cbfe 186{
2b1d88cd 187 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
d5e4cbfe
RW
188}
189
d8600c8b
KK
190static int genpd_start_dev(const struct generic_pm_domain *genpd,
191 struct device *dev)
d5e4cbfe 192{
2b1d88cd 193 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
d5e4cbfe
RW
194}
195
c4bb3160 196static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
f721889f 197{
c4bb3160
RW
198 bool ret = false;
199
200 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
201 ret = !!atomic_dec_and_test(&genpd->sd_count);
202
203 return ret;
204}
205
206static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
207{
208 atomic_inc(&genpd->sd_count);
4e857c58 209 smp_mb__after_atomic();
f721889f
RW
210}
211
afece3ab
TG
212#ifdef CONFIG_DEBUG_FS
213static void genpd_update_accounting(struct generic_pm_domain *genpd)
214{
215 ktime_t delta, now;
216
217 now = ktime_get();
218 delta = ktime_sub(now, genpd->accounting_time);
219
220 /*
221 * If genpd->status is active, it means we are just
222 * out of off and so update the idle time and vice
223 * versa.
224 */
49f618e1 225 if (genpd->status == GENPD_STATE_ON) {
afece3ab
TG
226 int state_idx = genpd->state_idx;
227
228 genpd->states[state_idx].idle_time =
229 ktime_add(genpd->states[state_idx].idle_time, delta);
230 } else {
231 genpd->on_time = ktime_add(genpd->on_time, delta);
232 }
233
234 genpd->accounting_time = now;
235}
236#else
237static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
238#endif
239
cd50c6d3
VK
240static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
241 unsigned int state)
242{
243 struct generic_pm_domain_data *pd_data;
244 struct pm_domain_data *pdd;
18edf49c 245 struct gpd_link *link;
cd50c6d3
VK
246
247 /* New requested state is same as Max requested state */
248 if (state == genpd->performance_state)
249 return state;
250
251 /* New requested state is higher than Max requested state */
252 if (state > genpd->performance_state)
253 return state;
254
255 /* Traverse all devices within the domain */
256 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
257 pd_data = to_gpd_data(pdd);
258
259 if (pd_data->performance_state > state)
260 state = pd_data->performance_state;
261 }
262
263 /*
18edf49c
VK
264 * Traverse all sub-domains within the domain. This can be
265 * done without any additional locking as the link->performance_state
8d87ae48 266 * field is protected by the parent genpd->lock, which is already taken.
18edf49c
VK
267 *
268 * Also note that link->performance_state (subdomain's performance state
8d87ae48
KC
269 * requirement to parent domain) is different from
270 * link->child->performance_state (current performance state requirement
18edf49c
VK
271 * of the devices/sub-domains of the subdomain) and so can have a
272 * different value.
273 *
274 * Note that we also take vote from powered-off sub-domains into account
275 * as the same is done for devices right now.
cd50c6d3 276 */
8d87ae48 277 list_for_each_entry(link, &genpd->parent_links, parent_node) {
18edf49c
VK
278 if (link->performance_state > state)
279 state = link->performance_state;
280 }
281
cd50c6d3
VK
282 return state;
283}
284
285static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
18edf49c 286 unsigned int state, int depth)
cd50c6d3 287{
8d87ae48 288 struct generic_pm_domain *parent;
18edf49c 289 struct gpd_link *link;
8d87ae48 290 int parent_state, ret;
cd50c6d3
VK
291
292 if (state == genpd->performance_state)
293 return 0;
294
8d87ae48
KC
295 /* Propagate to parents of genpd */
296 list_for_each_entry(link, &genpd->child_links, child_node) {
297 parent = link->parent;
18edf49c 298
8d87ae48 299 if (!parent->set_performance_state)
18edf49c
VK
300 continue;
301
8d87ae48 302 /* Find parent's performance state */
18edf49c 303 ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
8d87ae48 304 parent->opp_table,
18edf49c
VK
305 state);
306 if (unlikely(ret < 0))
307 goto err;
308
8d87ae48 309 parent_state = ret;
18edf49c 310
8d87ae48 311 genpd_lock_nested(parent, depth + 1);
18edf49c
VK
312
313 link->prev_performance_state = link->performance_state;
8d87ae48
KC
314 link->performance_state = parent_state;
315 parent_state = _genpd_reeval_performance_state(parent,
316 parent_state);
317 ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
18edf49c
VK
318 if (ret)
319 link->performance_state = link->prev_performance_state;
320
8d87ae48 321 genpd_unlock(parent);
18edf49c
VK
322
323 if (ret)
324 goto err;
325 }
326
cd50c6d3
VK
327 ret = genpd->set_performance_state(genpd, state);
328 if (ret)
18edf49c 329 goto err;
cd50c6d3
VK
330
331 genpd->performance_state = state;
332 return 0;
18edf49c
VK
333
334err:
335 /* Encountered an error, lets rollback */
8d87ae48
KC
336 list_for_each_entry_continue_reverse(link, &genpd->child_links,
337 child_node) {
338 parent = link->parent;
18edf49c 339
8d87ae48 340 if (!parent->set_performance_state)
18edf49c
VK
341 continue;
342
8d87ae48 343 genpd_lock_nested(parent, depth + 1);
18edf49c 344
8d87ae48
KC
345 parent_state = link->prev_performance_state;
346 link->performance_state = parent_state;
18edf49c 347
8d87ae48
KC
348 parent_state = _genpd_reeval_performance_state(parent,
349 parent_state);
350 if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
18edf49c 351 pr_err("%s: Failed to roll back to %d performance state\n",
8d87ae48 352 parent->name, parent_state);
18edf49c
VK
353 }
354
8d87ae48 355 genpd_unlock(parent);
18edf49c
VK
356 }
357
358 return ret;
cd50c6d3
VK
359}
360
42f6284a
VK
361/**
362 * dev_pm_genpd_set_performance_state- Set performance state of device's power
363 * domain.
364 *
365 * @dev: Device for which the performance-state needs to be set.
366 * @state: Target performance state of the device. This can be set as 0 when the
367 * device doesn't have any performance state constraints left (And so
368 * the device wouldn't participate anymore to find the target
369 * performance state of the genpd).
370 *
371 * It is assumed that the users guarantee that the genpd wouldn't be detached
372 * while this routine is getting called.
373 *
374 * Returns 0 on success and negative error values on failures.
375 */
376int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
377{
378 struct generic_pm_domain *genpd;
cd50c6d3 379 struct generic_pm_domain_data *gpd_data;
42f6284a 380 unsigned int prev;
cd50c6d3 381 int ret;
42f6284a 382
3ea4ca92
UH
383 genpd = dev_to_genpd_safe(dev);
384 if (!genpd)
42f6284a
VK
385 return -ENODEV;
386
387 if (unlikely(!genpd->set_performance_state))
388 return -EINVAL;
389
e757e7fa
YL
390 if (WARN_ON(!dev->power.subsys_data ||
391 !dev->power.subsys_data->domain_data))
42f6284a 392 return -EINVAL;
42f6284a
VK
393
394 genpd_lock(genpd);
395
396 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
397 prev = gpd_data->performance_state;
398 gpd_data->performance_state = state;
399
cd50c6d3 400 state = _genpd_reeval_performance_state(genpd, state);
18edf49c 401 ret = _genpd_set_performance_state(genpd, state, 0);
cd50c6d3 402 if (ret)
68de2fe5 403 gpd_data->performance_state = prev;
42f6284a 404
42f6284a
VK
405 genpd_unlock(genpd);
406
407 return ret;
408}
409EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
410
86e12eac 411static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
c8f0ea45 412{
fc5cbf0c 413 unsigned int state_idx = genpd->state_idx;
c8f0ea45
GU
414 ktime_t time_start;
415 s64 elapsed_ns;
d4f81383
UH
416 int ret, nr_calls = 0;
417
418 /* Notify consumers that we are about to power on. */
419 ret = __raw_notifier_call_chain(&genpd->power_notifiers,
420 GENPD_NOTIFY_PRE_ON, NULL, -1,
421 &nr_calls);
422 ret = notifier_to_errno(ret);
423 if (ret)
424 goto err;
c8f0ea45
GU
425
426 if (!genpd->power_on)
d4f81383 427 goto out;
c8f0ea45 428
d4f81383
UH
429 if (!timed) {
430 ret = genpd->power_on(genpd);
431 if (ret)
432 goto err;
433
434 goto out;
435 }
a4630c61 436
c8f0ea45
GU
437 time_start = ktime_get();
438 ret = genpd->power_on(genpd);
439 if (ret)
d4f81383 440 goto err;
c8f0ea45
GU
441
442 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
fc5cbf0c 443 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
d4f81383 444 goto out;
c8f0ea45 445
fc5cbf0c 446 genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
c8f0ea45 447 genpd->max_off_time_changed = true;
6d7d5c32
RK
448 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
449 genpd->name, "on", elapsed_ns);
c8f0ea45 450
d4f81383
UH
451out:
452 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
453 return 0;
454err:
455 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
456 NULL);
c8f0ea45
GU
457 return ret;
458}
459
86e12eac 460static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
c8f0ea45 461{
fc5cbf0c 462 unsigned int state_idx = genpd->state_idx;
c8f0ea45
GU
463 ktime_t time_start;
464 s64 elapsed_ns;
d4f81383
UH
465 int ret, nr_calls = 0;
466
467 /* Notify consumers that we are about to power off. */
468 ret = __raw_notifier_call_chain(&genpd->power_notifiers,
469 GENPD_NOTIFY_PRE_OFF, NULL, -1,
470 &nr_calls);
471 ret = notifier_to_errno(ret);
472 if (ret)
473 goto busy;
c8f0ea45
GU
474
475 if (!genpd->power_off)
d4f81383
UH
476 goto out;
477
478 if (!timed) {
479 ret = genpd->power_off(genpd);
480 if (ret)
481 goto busy;
c8f0ea45 482
d4f81383
UH
483 goto out;
484 }
a4630c61 485
c8f0ea45
GU
486 time_start = ktime_get();
487 ret = genpd->power_off(genpd);
0cec68a9 488 if (ret)
d4f81383 489 goto busy;
c8f0ea45
GU
490
491 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
fc5cbf0c 492 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
d4f81383 493 goto out;
c8f0ea45 494
fc5cbf0c 495 genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
c8f0ea45 496 genpd->max_off_time_changed = true;
6d7d5c32
RK
497 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
498 genpd->name, "off", elapsed_ns);
c8f0ea45 499
d4f81383
UH
500out:
501 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
502 NULL);
0cec68a9 503 return 0;
d4f81383
UH
504busy:
505 if (nr_calls)
506 __raw_notifier_call_chain(&genpd->power_notifiers,
507 GENPD_NOTIFY_ON, NULL, nr_calls - 1,
508 NULL);
509 return ret;
c8f0ea45
GU
510}
511
29e47e21 512/**
86e12eac 513 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
a3d09c73 514 * @genpd: PM domain to power off.
29e47e21 515 *
86e12eac 516 * Queue up the execution of genpd_power_off() unless it's already been done
29e47e21
UH
517 * before.
518 */
519static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
520{
521 queue_work(pm_wq, &genpd->power_off_work);
522}
523
1f8728b7
UH
524/**
525 * genpd_power_off - Remove power from a given PM domain.
526 * @genpd: PM domain to power down.
3c64649d
UH
527 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
528 * RPM status of the releated device is in an intermediate state, not yet turned
529 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
530 * be RPM_SUSPENDED, while it tries to power off the PM domain.
1f8728b7
UH
531 *
532 * If all of the @genpd's devices have been suspended and all of its subdomains
533 * have been powered down, remove power from @genpd.
534 */
2da83545
UH
535static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
536 unsigned int depth)
1f8728b7
UH
537{
538 struct pm_domain_data *pdd;
539 struct gpd_link *link;
540 unsigned int not_suspended = 0;
f63816e4 541 int ret;
1f8728b7
UH
542
543 /*
544 * Do not try to power off the domain in the following situations:
545 * (1) The domain is already in the "power off" state.
546 * (2) System suspend is in progress.
547 */
41e2c8e0 548 if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
1f8728b7
UH
549 return 0;
550
ffaa42e8
UH
551 /*
552 * Abort power off for the PM domain in the following situations:
553 * (1) The domain is configured as always on.
554 * (2) When the domain has a subdomain being powered on.
555 */
ed61e18a
LC
556 if (genpd_is_always_on(genpd) ||
557 genpd_is_rpm_always_on(genpd) ||
558 atomic_read(&genpd->sd_count) > 0)
1f8728b7
UH
559 return -EBUSY;
560
561 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
562 enum pm_qos_flags_status stat;
563
20f97caf 564 stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF);
1f8728b7
UH
565 if (stat > PM_QOS_FLAGS_NONE)
566 return -EBUSY;
567
568 /*
569 * Do not allow PM domain to be powered off, when an IRQ safe
570 * device is part of a non-IRQ safe domain.
571 */
572 if (!pm_runtime_suspended(pdd->dev) ||
573 irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
574 not_suspended++;
575 }
576
3c64649d 577 if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
1f8728b7
UH
578 return -EBUSY;
579
580 if (genpd->gov && genpd->gov->power_down_ok) {
581 if (!genpd->gov->power_down_ok(&genpd->domain))
582 return -EAGAIN;
583 }
584
2c9b7f87
UH
585 /* Default to shallowest state. */
586 if (!genpd->gov)
587 genpd->state_idx = 0;
588
f63816e4
UH
589 /* Don't power off, if a child domain is waiting to power on. */
590 if (atomic_read(&genpd->sd_count) > 0)
591 return -EBUSY;
1f8728b7 592
f63816e4 593 ret = _genpd_power_off(genpd, true);
c6a113b5
LI
594 if (ret) {
595 genpd->states[genpd->state_idx].rejected++;
f63816e4 596 return ret;
c6a113b5 597 }
1f8728b7 598
49f618e1 599 genpd->status = GENPD_STATE_OFF;
afece3ab 600 genpd_update_accounting(genpd);
c6a113b5 601 genpd->states[genpd->state_idx].usage++;
1f8728b7 602
8d87ae48
KC
603 list_for_each_entry(link, &genpd->child_links, child_node) {
604 genpd_sd_counter_dec(link->parent);
605 genpd_lock_nested(link->parent, depth + 1);
606 genpd_power_off(link->parent, false, depth + 1);
607 genpd_unlock(link->parent);
1f8728b7
UH
608 }
609
610 return 0;
611}
612
5248051b 613/**
8d87ae48 614 * genpd_power_on - Restore power to a given PM domain and its parents.
5248051b 615 * @genpd: PM domain to power up.
0106ef51 616 * @depth: nesting count for lockdep.
5248051b 617 *
8d87ae48 618 * Restore power to @genpd and all of its parents so that it is possible to
5248051b
RW
619 * resume a device belonging to it.
620 */
86e12eac 621static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
5248051b 622{
5063ce15 623 struct gpd_link *link;
5248051b
RW
624 int ret = 0;
625
41e2c8e0 626 if (genpd_status_on(genpd))
3f241775 627 return 0;
5248051b 628
5063ce15
RW
629 /*
630 * The list is guaranteed not to change while the loop below is being
8d87ae48 631 * executed, unless one of the parents' .power_on() callbacks fiddles
5063ce15
RW
632 * with it.
633 */
8d87ae48
KC
634 list_for_each_entry(link, &genpd->child_links, child_node) {
635 struct generic_pm_domain *parent = link->parent;
0106ef51 636
8d87ae48 637 genpd_sd_counter_inc(parent);
0106ef51 638
8d87ae48
KC
639 genpd_lock_nested(parent, depth + 1);
640 ret = genpd_power_on(parent, depth + 1);
641 genpd_unlock(parent);
5248051b 642
5063ce15 643 if (ret) {
8d87ae48 644 genpd_sd_counter_dec(parent);
9e08cf42 645 goto err;
5063ce15 646 }
5248051b
RW
647 }
648
86e12eac 649 ret = _genpd_power_on(genpd, true);
c8f0ea45
GU
650 if (ret)
651 goto err;
5248051b 652
49f618e1 653 genpd->status = GENPD_STATE_ON;
afece3ab
TG
654 genpd_update_accounting(genpd);
655
3f241775 656 return 0;
9e08cf42
RW
657
658 err:
29e47e21 659 list_for_each_entry_continue_reverse(link,
8d87ae48
KC
660 &genpd->child_links,
661 child_node) {
662 genpd_sd_counter_dec(link->parent);
663 genpd_lock_nested(link->parent, depth + 1);
664 genpd_power_off(link->parent, false, depth + 1);
665 genpd_unlock(link->parent);
29e47e21 666 }
9e08cf42 667
3f241775
RW
668 return ret;
669}
670
ea71c596
UH
671static int genpd_dev_pm_start(struct device *dev)
672{
673 struct generic_pm_domain *genpd = dev_to_genpd(dev);
674
675 return genpd_start_dev(genpd, dev);
676}
677
6ff7bb0d
RW
678static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
679 unsigned long val, void *ptr)
680{
681 struct generic_pm_domain_data *gpd_data;
682 struct device *dev;
683
684 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
6ff7bb0d 685 dev = gpd_data->base.dev;
6ff7bb0d
RW
686
687 for (;;) {
688 struct generic_pm_domain *genpd;
689 struct pm_domain_data *pdd;
690
691 spin_lock_irq(&dev->power.lock);
692
693 pdd = dev->power.subsys_data ?
694 dev->power.subsys_data->domain_data : NULL;
b4883ca4 695 if (pdd) {
6ff7bb0d
RW
696 to_gpd_data(pdd)->td.constraint_changed = true;
697 genpd = dev_to_genpd(dev);
698 } else {
699 genpd = ERR_PTR(-ENODATA);
700 }
701
702 spin_unlock_irq(&dev->power.lock);
703
704 if (!IS_ERR(genpd)) {
35241d12 705 genpd_lock(genpd);
6ff7bb0d 706 genpd->max_off_time_changed = true;
35241d12 707 genpd_unlock(genpd);
6ff7bb0d
RW
708 }
709
710 dev = dev->parent;
711 if (!dev || dev->power.ignore_children)
712 break;
713 }
714
715 return NOTIFY_DONE;
716}
717
f721889f
RW
718/**
719 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
720 * @work: Work structure used for scheduling the execution of this function.
721 */
722static void genpd_power_off_work_fn(struct work_struct *work)
723{
724 struct generic_pm_domain *genpd;
725
726 genpd = container_of(work, struct generic_pm_domain, power_off_work);
727
35241d12 728 genpd_lock(genpd);
2da83545 729 genpd_power_off(genpd, false, 0);
35241d12 730 genpd_unlock(genpd);
f721889f
RW
731}
732
54eeddbf
UH
733/**
734 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
735 * @dev: Device to handle.
736 */
737static int __genpd_runtime_suspend(struct device *dev)
738{
739 int (*cb)(struct device *__dev);
740
741 if (dev->type && dev->type->pm)
742 cb = dev->type->pm->runtime_suspend;
743 else if (dev->class && dev->class->pm)
744 cb = dev->class->pm->runtime_suspend;
745 else if (dev->bus && dev->bus->pm)
746 cb = dev->bus->pm->runtime_suspend;
747 else
748 cb = NULL;
749
750 if (!cb && dev->driver && dev->driver->pm)
751 cb = dev->driver->pm->runtime_suspend;
752
753 return cb ? cb(dev) : 0;
754}
755
756/**
757 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
758 * @dev: Device to handle.
759 */
760static int __genpd_runtime_resume(struct device *dev)
761{
762 int (*cb)(struct device *__dev);
763
764 if (dev->type && dev->type->pm)
765 cb = dev->type->pm->runtime_resume;
766 else if (dev->class && dev->class->pm)
767 cb = dev->class->pm->runtime_resume;
768 else if (dev->bus && dev->bus->pm)
769 cb = dev->bus->pm->runtime_resume;
770 else
771 cb = NULL;
772
773 if (!cb && dev->driver && dev->driver->pm)
774 cb = dev->driver->pm->runtime_resume;
775
776 return cb ? cb(dev) : 0;
777}
778
f721889f 779/**
795bd2e7 780 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
f721889f
RW
781 * @dev: Device to suspend.
782 *
783 * Carry out a runtime suspend of a device under the assumption that its
784 * pm_domain field points to the domain member of an object of type
785 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
786 */
795bd2e7 787static int genpd_runtime_suspend(struct device *dev)
f721889f
RW
788{
789 struct generic_pm_domain *genpd;
9df3921e 790 bool (*suspend_ok)(struct device *__dev);
2b1d88cd 791 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
ffe12855 792 bool runtime_pm = pm_runtime_enabled(dev);
2b1d88cd
UH
793 ktime_t time_start;
794 s64 elapsed_ns;
d5e4cbfe 795 int ret;
f721889f
RW
796
797 dev_dbg(dev, "%s()\n", __func__);
798
5248051b
RW
799 genpd = dev_to_genpd(dev);
800 if (IS_ERR(genpd))
f721889f
RW
801 return -EINVAL;
802
ffe12855
UH
803 /*
804 * A runtime PM centric subsystem/driver may re-use the runtime PM
805 * callbacks for other purposes than runtime PM. In those scenarios
806 * runtime PM is disabled. Under these circumstances, we shall skip
807 * validating/measuring the PM QoS latency.
808 */
9df3921e
UH
809 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
810 if (runtime_pm && suspend_ok && !suspend_ok(dev))
b02c999a
RW
811 return -EBUSY;
812
2b1d88cd 813 /* Measure suspend latency. */
d33d5a6c 814 time_start = 0;
ffe12855
UH
815 if (runtime_pm)
816 time_start = ktime_get();
2b1d88cd 817
54eeddbf 818 ret = __genpd_runtime_suspend(dev);
d5e4cbfe
RW
819 if (ret)
820 return ret;
17b75eca 821
2b1d88cd 822 ret = genpd_stop_dev(genpd, dev);
ba2bbfbf 823 if (ret) {
54eeddbf 824 __genpd_runtime_resume(dev);
ba2bbfbf
UH
825 return ret;
826 }
827
2b1d88cd 828 /* Update suspend latency value if the measured time exceeds it. */
ffe12855
UH
829 if (runtime_pm) {
830 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
831 if (elapsed_ns > td->suspend_latency_ns) {
832 td->suspend_latency_ns = elapsed_ns;
833 dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
834 elapsed_ns);
835 genpd->max_off_time_changed = true;
836 td->constraint_changed = true;
837 }
2b1d88cd
UH
838 }
839
0aa2a221 840 /*
d716f479
LI
841 * If power.irq_safe is set, this routine may be run with
842 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
0aa2a221 843 */
d716f479 844 if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
0aa2a221
RW
845 return 0;
846
35241d12 847 genpd_lock(genpd);
2da83545 848 genpd_power_off(genpd, true, 0);
35241d12 849 genpd_unlock(genpd);
f721889f
RW
850
851 return 0;
852}
853
f721889f 854/**
795bd2e7 855 * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
f721889f
RW
856 * @dev: Device to resume.
857 *
858 * Carry out a runtime resume of a device under the assumption that its
859 * pm_domain field points to the domain member of an object of type
860 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
861 */
795bd2e7 862static int genpd_runtime_resume(struct device *dev)
f721889f
RW
863{
864 struct generic_pm_domain *genpd;
2b1d88cd 865 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
ffe12855 866 bool runtime_pm = pm_runtime_enabled(dev);
2b1d88cd
UH
867 ktime_t time_start;
868 s64 elapsed_ns;
f721889f 869 int ret;
ba2bbfbf 870 bool timed = true;
f721889f
RW
871
872 dev_dbg(dev, "%s()\n", __func__);
873
5248051b
RW
874 genpd = dev_to_genpd(dev);
875 if (IS_ERR(genpd))
f721889f
RW
876 return -EINVAL;
877
d716f479
LI
878 /*
879 * As we don't power off a non IRQ safe domain, which holds
880 * an IRQ safe device, we don't need to restore power to it.
881 */
882 if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
ba2bbfbf
UH
883 timed = false;
884 goto out;
885 }
0aa2a221 886
35241d12 887 genpd_lock(genpd);
86e12eac 888 ret = genpd_power_on(genpd, 0);
35241d12 889 genpd_unlock(genpd);
c6d22b37 890
ba2bbfbf
UH
891 if (ret)
892 return ret;
c6d22b37 893
ba2bbfbf 894 out:
2b1d88cd 895 /* Measure resume latency. */
ab51e6ba 896 time_start = 0;
ffe12855 897 if (timed && runtime_pm)
2b1d88cd
UH
898 time_start = ktime_get();
899
076395ca
LP
900 ret = genpd_start_dev(genpd, dev);
901 if (ret)
902 goto err_poweroff;
903
54eeddbf 904 ret = __genpd_runtime_resume(dev);
076395ca
LP
905 if (ret)
906 goto err_stop;
2b1d88cd
UH
907
908 /* Update resume latency value if the measured time exceeds it. */
ffe12855 909 if (timed && runtime_pm) {
2b1d88cd
UH
910 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
911 if (elapsed_ns > td->resume_latency_ns) {
912 td->resume_latency_ns = elapsed_ns;
913 dev_dbg(dev, "resume latency exceeded, %lld ns\n",
914 elapsed_ns);
915 genpd->max_off_time_changed = true;
916 td->constraint_changed = true;
917 }
918 }
17b75eca 919
f721889f 920 return 0;
076395ca
LP
921
922err_stop:
923 genpd_stop_dev(genpd, dev);
924err_poweroff:
d716f479
LI
925 if (!pm_runtime_is_irq_safe(dev) ||
926 (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
35241d12 927 genpd_lock(genpd);
2da83545 928 genpd_power_off(genpd, true, 0);
35241d12 929 genpd_unlock(genpd);
076395ca
LP
930 }
931
932 return ret;
f721889f
RW
933}
934
39ac5ba5
TB
935static bool pd_ignore_unused;
936static int __init pd_ignore_unused_setup(char *__unused)
937{
938 pd_ignore_unused = true;
939 return 1;
940}
941__setup("pd_ignore_unused", pd_ignore_unused_setup);
942
17f2ae7f 943/**
86e12eac 944 * genpd_power_off_unused - Power off all PM domains with no devices in use.
17f2ae7f 945 */
86e12eac 946static int __init genpd_power_off_unused(void)
17f2ae7f
RW
947{
948 struct generic_pm_domain *genpd;
949
39ac5ba5
TB
950 if (pd_ignore_unused) {
951 pr_warn("genpd: Not disabling unused power domains\n");
bb4b72fc 952 return 0;
39ac5ba5
TB
953 }
954
17f2ae7f
RW
955 mutex_lock(&gpd_list_lock);
956
957 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
958 genpd_queue_power_off_work(genpd);
959
960 mutex_unlock(&gpd_list_lock);
17f2ae7f 961
2fe71dcd
UH
962 return 0;
963}
86e12eac 964late_initcall(genpd_power_off_unused);
2fe71dcd 965
0159ec67
JH
966#ifdef CONFIG_PM_SLEEP
967
596ba34b 968/**
8d87ae48 969 * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
596ba34b 970 * @genpd: PM domain to power off, if possible.
0883ac03
UH
971 * @use_lock: use the lock.
972 * @depth: nesting count for lockdep.
596ba34b
RW
973 *
974 * Check if the given PM domain can be powered off (during system suspend or
8d87ae48 975 * hibernation) and do that if so. Also, in that case propagate to its parents.
596ba34b 976 *
77f827de 977 * This function is only called in "noirq" and "syscore" stages of system power
0883ac03
UH
978 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
979 * these cases the lock must be held.
596ba34b 980 */
0883ac03
UH
981static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
982 unsigned int depth)
596ba34b 983{
5063ce15 984 struct gpd_link *link;
596ba34b 985
ffaa42e8 986 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
596ba34b
RW
987 return;
988
c4bb3160
RW
989 if (genpd->suspended_count != genpd->device_count
990 || atomic_read(&genpd->sd_count) > 0)
596ba34b
RW
991 return;
992
fc5cbf0c
AH
993 /* Choose the deepest state when suspending */
994 genpd->state_idx = genpd->state_count - 1;
1c14967c
UH
995 if (_genpd_power_off(genpd, false))
996 return;
596ba34b 997
49f618e1 998 genpd->status = GENPD_STATE_OFF;
5063ce15 999
8d87ae48
KC
1000 list_for_each_entry(link, &genpd->child_links, child_node) {
1001 genpd_sd_counter_dec(link->parent);
0883ac03
UH
1002
1003 if (use_lock)
8d87ae48 1004 genpd_lock_nested(link->parent, depth + 1);
0883ac03 1005
8d87ae48 1006 genpd_sync_power_off(link->parent, use_lock, depth + 1);
0883ac03
UH
1007
1008 if (use_lock)
8d87ae48 1009 genpd_unlock(link->parent);
596ba34b
RW
1010 }
1011}
1012
802d8b49 1013/**
8d87ae48 1014 * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
802d8b49 1015 * @genpd: PM domain to power on.
0883ac03
UH
1016 * @use_lock: use the lock.
1017 * @depth: nesting count for lockdep.
802d8b49 1018 *
77f827de 1019 * This function is only called in "noirq" and "syscore" stages of system power
0883ac03
UH
1020 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1021 * these cases the lock must be held.
802d8b49 1022 */
0883ac03
UH
1023static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1024 unsigned int depth)
802d8b49
RW
1025{
1026 struct gpd_link *link;
1027
41e2c8e0 1028 if (genpd_status_on(genpd))
802d8b49
RW
1029 return;
1030
8d87ae48
KC
1031 list_for_each_entry(link, &genpd->child_links, child_node) {
1032 genpd_sd_counter_inc(link->parent);
0883ac03
UH
1033
1034 if (use_lock)
8d87ae48 1035 genpd_lock_nested(link->parent, depth + 1);
0883ac03 1036
8d87ae48 1037 genpd_sync_power_on(link->parent, use_lock, depth + 1);
0883ac03
UH
1038
1039 if (use_lock)
8d87ae48 1040 genpd_unlock(link->parent);
802d8b49
RW
1041 }
1042
86e12eac 1043 _genpd_power_on(genpd, false);
49f618e1 1044 genpd->status = GENPD_STATE_ON;
802d8b49
RW
1045}
1046
4ecd6e65
RW
1047/**
1048 * resume_needed - Check whether to resume a device before system suspend.
1049 * @dev: Device to check.
1050 * @genpd: PM domain the device belongs to.
1051 *
1052 * There are two cases in which a device that can wake up the system from sleep
9e9704ea 1053 * states should be resumed by genpd_prepare(): (1) if the device is enabled
4ecd6e65
RW
1054 * to wake up the system and it has to remain active for this purpose while the
1055 * system is in the sleep state and (2) if the device is not enabled to wake up
1056 * the system from sleep states and it generally doesn't generate wakeup signals
1057 * by itself (those signals are generated on its behalf by other parts of the
1058 * system). In the latter case it may be necessary to reconfigure the device's
1059 * wakeup settings during system suspend, because it may have been set up to
1060 * signal remote wakeup from the system's working state as needed by runtime PM.
1061 * Return 'true' in either of the above cases.
1062 */
d8600c8b
KK
1063static bool resume_needed(struct device *dev,
1064 const struct generic_pm_domain *genpd)
4ecd6e65
RW
1065{
1066 bool active_wakeup;
1067
1068 if (!device_can_wakeup(dev))
1069 return false;
1070
d0af45f1 1071 active_wakeup = genpd_is_active_wakeup(genpd);
4ecd6e65
RW
1072 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
1073}
1074
596ba34b 1075/**
9e9704ea 1076 * genpd_prepare - Start power transition of a device in a PM domain.
596ba34b
RW
1077 * @dev: Device to start the transition of.
1078 *
1079 * Start a power transition of a device (during a system-wide power transition)
1080 * under the assumption that its pm_domain field points to the domain member of
1081 * an object of type struct generic_pm_domain representing a PM domain
1082 * consisting of I/O devices.
1083 */
9e9704ea 1084static int genpd_prepare(struct device *dev)
596ba34b
RW
1085{
1086 struct generic_pm_domain *genpd;
b6c10c84 1087 int ret;
596ba34b
RW
1088
1089 dev_dbg(dev, "%s()\n", __func__);
1090
1091 genpd = dev_to_genpd(dev);
1092 if (IS_ERR(genpd))
1093 return -EINVAL;
1094
17b75eca
RW
1095 /*
1096 * If a wakeup request is pending for the device, it should be woken up
1097 * at this point and a system wakeup event should be reported if it's
1098 * set up to wake up the system from sleep states.
1099 */
4ecd6e65
RW
1100 if (resume_needed(dev, genpd))
1101 pm_runtime_resume(dev);
1102
35241d12 1103 genpd_lock(genpd);
596ba34b 1104
39dd0f23 1105 if (genpd->prepared_count++ == 0)
65533bbf 1106 genpd->suspended_count = 0;
17b75eca 1107
35241d12 1108 genpd_unlock(genpd);
596ba34b 1109
b6c10c84 1110 ret = pm_generic_prepare(dev);
5241ab40 1111 if (ret < 0) {
35241d12 1112 genpd_lock(genpd);
b6c10c84 1113
39dd0f23 1114 genpd->prepared_count--;
b6c10c84 1115
35241d12 1116 genpd_unlock(genpd);
b6c10c84 1117 }
17b75eca 1118
5241ab40
UH
1119 /* Never return 1, as genpd don't cope with the direct_complete path. */
1120 return ret >= 0 ? 0 : ret;
596ba34b
RW
1121}
1122
0496c8ae 1123/**
10da6542
MP
1124 * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1125 * I/O pm domain.
0496c8ae 1126 * @dev: Device to suspend.
10da6542 1127 * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback.
0496c8ae
RW
1128 *
1129 * Stop the device and remove power from the domain if all devices in it have
1130 * been stopped.
1131 */
10da6542 1132static int genpd_finish_suspend(struct device *dev, bool poweroff)
0496c8ae
RW
1133{
1134 struct generic_pm_domain *genpd;
a935424b 1135 int ret = 0;
0496c8ae 1136
0496c8ae
RW
1137 genpd = dev_to_genpd(dev);
1138 if (IS_ERR(genpd))
1139 return -EINVAL;
596ba34b 1140
10da6542
MP
1141 if (poweroff)
1142 ret = pm_generic_poweroff_noirq(dev);
1143 else
1144 ret = pm_generic_suspend_noirq(dev);
1145 if (ret)
1146 return ret;
1147
a935424b
UH
1148 if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
1149 return 0;
1150
17218e00
RW
1151 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1152 !pm_runtime_status_suspended(dev)) {
1153 ret = genpd_stop_dev(genpd, dev);
a935424b
UH
1154 if (ret) {
1155 if (poweroff)
1156 pm_generic_restore_noirq(dev);
1157 else
1158 pm_generic_resume_noirq(dev);
122a2237 1159 return ret;
a935424b 1160 }
122a2237
UH
1161 }
1162
0883ac03 1163 genpd_lock(genpd);
596ba34b 1164 genpd->suspended_count++;
0883ac03
UH
1165 genpd_sync_power_off(genpd, true, 0);
1166 genpd_unlock(genpd);
596ba34b
RW
1167
1168 return 0;
1169}
1170
10da6542 1171/**
9e9704ea 1172 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
10da6542
MP
1173 * @dev: Device to suspend.
1174 *
1175 * Stop the device and remove power from the domain if all devices in it have
1176 * been stopped.
1177 */
9e9704ea 1178static int genpd_suspend_noirq(struct device *dev)
10da6542
MP
1179{
1180 dev_dbg(dev, "%s()\n", __func__);
1181
1182 return genpd_finish_suspend(dev, false);
1183}
1184
596ba34b 1185/**
9e9704ea 1186 * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
596ba34b
RW
1187 * @dev: Device to resume.
1188 *
0496c8ae 1189 * Restore power to the device's PM domain, if necessary, and start the device.
596ba34b 1190 */
9e9704ea 1191static int genpd_resume_noirq(struct device *dev)
596ba34b
RW
1192{
1193 struct generic_pm_domain *genpd;
a935424b 1194 int ret;
596ba34b
RW
1195
1196 dev_dbg(dev, "%s()\n", __func__);
1197
1198 genpd = dev_to_genpd(dev);
1199 if (IS_ERR(genpd))
1200 return -EINVAL;
1201
d0af45f1 1202 if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
a935424b 1203 return pm_generic_resume_noirq(dev);
596ba34b 1204
0883ac03
UH
1205 genpd_lock(genpd);
1206 genpd_sync_power_on(genpd, true, 0);
596ba34b 1207 genpd->suspended_count--;
0883ac03 1208 genpd_unlock(genpd);
596ba34b 1209
17218e00
RW
1210 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1211 !pm_runtime_status_suspended(dev)) {
1212 ret = genpd_start_dev(genpd, dev);
a935424b
UH
1213 if (ret)
1214 return ret;
1215 }
122a2237 1216
a935424b 1217 return pm_generic_resume_noirq(dev);
596ba34b
RW
1218}
1219
0496c8ae 1220/**
9e9704ea 1221 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
596ba34b
RW
1222 * @dev: Device to freeze.
1223 *
1224 * Carry out a late freeze of a device under the assumption that its
1225 * pm_domain field points to the domain member of an object of type
1226 * struct generic_pm_domain representing a power domain consisting of I/O
1227 * devices.
1228 */
9e9704ea 1229static int genpd_freeze_noirq(struct device *dev)
596ba34b 1230{
d8600c8b 1231 const struct generic_pm_domain *genpd;
122a2237 1232 int ret = 0;
596ba34b
RW
1233
1234 dev_dbg(dev, "%s()\n", __func__);
1235
1236 genpd = dev_to_genpd(dev);
1237 if (IS_ERR(genpd))
1238 return -EINVAL;
1239
10da6542
MP
1240 ret = pm_generic_freeze_noirq(dev);
1241 if (ret)
1242 return ret;
1243
17218e00
RW
1244 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1245 !pm_runtime_status_suspended(dev))
1246 ret = genpd_stop_dev(genpd, dev);
122a2237
UH
1247
1248 return ret;
0496c8ae 1249}
596ba34b 1250
0496c8ae 1251/**
9e9704ea 1252 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
0496c8ae
RW
1253 * @dev: Device to thaw.
1254 *
1255 * Start the device, unless power has been removed from the domain already
1256 * before the system transition.
1257 */
9e9704ea 1258static int genpd_thaw_noirq(struct device *dev)
0496c8ae 1259{
d8600c8b 1260 const struct generic_pm_domain *genpd;
122a2237 1261 int ret = 0;
596ba34b 1262
0496c8ae 1263 dev_dbg(dev, "%s()\n", __func__);
596ba34b 1264
0496c8ae
RW
1265 genpd = dev_to_genpd(dev);
1266 if (IS_ERR(genpd))
1267 return -EINVAL;
1268
17218e00
RW
1269 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1270 !pm_runtime_status_suspended(dev)) {
1271 ret = genpd_start_dev(genpd, dev);
10da6542
MP
1272 if (ret)
1273 return ret;
1274 }
122a2237 1275
10da6542
MP
1276 return pm_generic_thaw_noirq(dev);
1277}
1278
1279/**
9e9704ea 1280 * genpd_poweroff_noirq - Completion of hibernation of device in an
10da6542
MP
1281 * I/O PM domain.
1282 * @dev: Device to poweroff.
1283 *
1284 * Stop the device and remove power from the domain if all devices in it have
1285 * been stopped.
1286 */
9e9704ea 1287static int genpd_poweroff_noirq(struct device *dev)
10da6542
MP
1288{
1289 dev_dbg(dev, "%s()\n", __func__);
1290
1291 return genpd_finish_suspend(dev, true);
596ba34b
RW
1292}
1293
596ba34b 1294/**
9e9704ea 1295 * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
596ba34b
RW
1296 * @dev: Device to resume.
1297 *
0496c8ae
RW
1298 * Make sure the domain will be in the same power state as before the
1299 * hibernation the system is resuming from and start the device if necessary.
596ba34b 1300 */
9e9704ea 1301static int genpd_restore_noirq(struct device *dev)
596ba34b
RW
1302{
1303 struct generic_pm_domain *genpd;
122a2237 1304 int ret = 0;
596ba34b
RW
1305
1306 dev_dbg(dev, "%s()\n", __func__);
1307
1308 genpd = dev_to_genpd(dev);
1309 if (IS_ERR(genpd))
1310 return -EINVAL;
1311
1312 /*
65533bbf
RW
1313 * At this point suspended_count == 0 means we are being run for the
1314 * first time for the given domain in the present cycle.
596ba34b 1315 */
0883ac03 1316 genpd_lock(genpd);
505a70b7 1317 if (genpd->suspended_count++ == 0) {
596ba34b 1318 /*
65533bbf 1319 * The boot kernel might put the domain into arbitrary state,
86e12eac 1320 * so make it appear as powered off to genpd_sync_power_on(),
802d8b49 1321 * so that it tries to power it on in case it was really off.
596ba34b 1322 */
49f618e1 1323 genpd->status = GENPD_STATE_OFF;
505a70b7 1324 }
18dd2ece 1325
0883ac03
UH
1326 genpd_sync_power_on(genpd, true, 0);
1327 genpd_unlock(genpd);
596ba34b 1328
17218e00
RW
1329 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1330 !pm_runtime_status_suspended(dev)) {
1331 ret = genpd_start_dev(genpd, dev);
10da6542
MP
1332 if (ret)
1333 return ret;
1334 }
122a2237 1335
10da6542 1336 return pm_generic_restore_noirq(dev);
596ba34b
RW
1337}
1338
1339/**
9e9704ea 1340 * genpd_complete - Complete power transition of a device in a power domain.
596ba34b
RW
1341 * @dev: Device to complete the transition of.
1342 *
1343 * Complete a power transition of a device (during a system-wide power
1344 * transition) under the assumption that its pm_domain field points to the
1345 * domain member of an object of type struct generic_pm_domain representing
1346 * a power domain consisting of I/O devices.
1347 */
9e9704ea 1348static void genpd_complete(struct device *dev)
596ba34b
RW
1349{
1350 struct generic_pm_domain *genpd;
596ba34b
RW
1351
1352 dev_dbg(dev, "%s()\n", __func__);
1353
1354 genpd = dev_to_genpd(dev);
1355 if (IS_ERR(genpd))
1356 return;
1357
4d23a5e8
UH
1358 pm_generic_complete(dev);
1359
35241d12 1360 genpd_lock(genpd);
596ba34b 1361
39dd0f23 1362 genpd->prepared_count--;
4d23a5e8
UH
1363 if (!genpd->prepared_count)
1364 genpd_queue_power_off_work(genpd);
596ba34b 1365
35241d12 1366 genpd_unlock(genpd);
596ba34b
RW
1367}
1368
77f827de 1369/**
d47e6464 1370 * genpd_syscore_switch - Switch power during system core suspend or resume.
77f827de
RW
1371 * @dev: Device that normally is marked as "always on" to switch power for.
1372 *
1373 * This routine may only be called during the system core (syscore) suspend or
1374 * resume phase for devices whose "always on" flags are set.
1375 */
d47e6464 1376static void genpd_syscore_switch(struct device *dev, bool suspend)
77f827de
RW
1377{
1378 struct generic_pm_domain *genpd;
1379
fe0c2baa
UH
1380 genpd = dev_to_genpd_safe(dev);
1381 if (!genpd)
77f827de
RW
1382 return;
1383
1384 if (suspend) {
1385 genpd->suspended_count++;
0883ac03 1386 genpd_sync_power_off(genpd, false, 0);
77f827de 1387 } else {
0883ac03 1388 genpd_sync_power_on(genpd, false, 0);
77f827de
RW
1389 genpd->suspended_count--;
1390 }
1391}
d47e6464
UH
1392
1393void pm_genpd_syscore_poweroff(struct device *dev)
1394{
1395 genpd_syscore_switch(dev, true);
1396}
1397EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1398
1399void pm_genpd_syscore_poweron(struct device *dev)
1400{
1401 genpd_syscore_switch(dev, false);
1402}
1403EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
77f827de 1404
d30d819d 1405#else /* !CONFIG_PM_SLEEP */
596ba34b 1406
9e9704ea
UH
1407#define genpd_prepare NULL
1408#define genpd_suspend_noirq NULL
1409#define genpd_resume_noirq NULL
1410#define genpd_freeze_noirq NULL
1411#define genpd_thaw_noirq NULL
1412#define genpd_poweroff_noirq NULL
1413#define genpd_restore_noirq NULL
1414#define genpd_complete NULL
596ba34b
RW
1415
1416#endif /* CONFIG_PM_SLEEP */
1417
a174920d 1418static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev)
1d5fcfec
RW
1419{
1420 struct generic_pm_domain_data *gpd_data;
3e235685
UH
1421 int ret;
1422
1423 ret = dev_pm_get_subsys_data(dev);
1424 if (ret)
1425 return ERR_PTR(ret);
1d5fcfec
RW
1426
1427 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
3e235685
UH
1428 if (!gpd_data) {
1429 ret = -ENOMEM;
1430 goto err_put;
1431 }
1d5fcfec 1432
f104e1e5 1433 gpd_data->base.dev = dev;
f104e1e5 1434 gpd_data->td.constraint_changed = true;
0759e80b 1435 gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
f104e1e5
UH
1436 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1437
1438 spin_lock_irq(&dev->power.lock);
1439
1440 if (dev->power.subsys_data->domain_data) {
1441 ret = -EINVAL;
1442 goto err_free;
1443 }
1444
1445 dev->power.subsys_data->domain_data = &gpd_data->base;
f104e1e5
UH
1446
1447 spin_unlock_irq(&dev->power.lock);
1448
1d5fcfec 1449 return gpd_data;
3e235685 1450
f104e1e5
UH
1451 err_free:
1452 spin_unlock_irq(&dev->power.lock);
1453 kfree(gpd_data);
3e235685
UH
1454 err_put:
1455 dev_pm_put_subsys_data(dev);
1456 return ERR_PTR(ret);
1d5fcfec
RW
1457}
1458
49d400c7
UH
1459static void genpd_free_dev_data(struct device *dev,
1460 struct generic_pm_domain_data *gpd_data)
1d5fcfec 1461{
f104e1e5
UH
1462 spin_lock_irq(&dev->power.lock);
1463
f104e1e5
UH
1464 dev->power.subsys_data->domain_data = NULL;
1465
1466 spin_unlock_irq(&dev->power.lock);
1467
1d5fcfec 1468 kfree(gpd_data);
3e235685 1469 dev_pm_put_subsys_data(dev);
1d5fcfec
RW
1470}
1471
b24e1965
UH
1472static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1473 int cpu, bool set, unsigned int depth)
eb594b73
UH
1474{
1475 struct gpd_link *link;
1476
1477 if (!genpd_is_cpu_domain(genpd))
1478 return;
1479
8d87ae48
KC
1480 list_for_each_entry(link, &genpd->child_links, child_node) {
1481 struct generic_pm_domain *parent = link->parent;
eb594b73 1482
8d87ae48
KC
1483 genpd_lock_nested(parent, depth + 1);
1484 genpd_update_cpumask(parent, cpu, set, depth + 1);
1485 genpd_unlock(parent);
eb594b73
UH
1486 }
1487
1488 if (set)
1489 cpumask_set_cpu(cpu, genpd->cpus);
1490 else
1491 cpumask_clear_cpu(cpu, genpd->cpus);
1492}
1493
b24e1965
UH
1494static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1495{
1496 if (cpu >= 0)
1497 genpd_update_cpumask(genpd, cpu, true, 0);
1498}
1499
1500static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1501{
1502 if (cpu >= 0)
1503 genpd_update_cpumask(genpd, cpu, false, 0);
1504}
1505
1506static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
eb594b73
UH
1507{
1508 int cpu;
1509
1510 if (!genpd_is_cpu_domain(genpd))
b24e1965 1511 return -1;
eb594b73
UH
1512
1513 for_each_possible_cpu(cpu) {
b24e1965
UH
1514 if (get_cpu_device(cpu) == dev)
1515 return cpu;
eb594b73 1516 }
eb594b73 1517
b24e1965 1518 return -1;
eb594b73
UH
1519}
1520
f9ccd7c3
UH
1521static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1522 struct device *base_dev)
f721889f 1523{
c0356db7 1524 struct generic_pm_domain_data *gpd_data;
f9ccd7c3 1525 int ret;
f721889f
RW
1526
1527 dev_dbg(dev, "%s()\n", __func__);
1528
1529 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1530 return -EINVAL;
1531
a174920d 1532 gpd_data = genpd_alloc_dev_data(dev);
3e235685
UH
1533 if (IS_ERR(gpd_data))
1534 return PTR_ERR(gpd_data);
6ff7bb0d 1535
f9ccd7c3 1536 gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
b24e1965 1537
b472c2fa
UH
1538 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1539 if (ret)
1540 goto out;
d79b6fe1 1541
2071ac98
JW
1542 genpd_lock(genpd);
1543
f9ccd7c3 1544 genpd_set_cpumask(genpd, gpd_data->cpu);
975e83cf
SH
1545 dev_pm_domain_set(dev, &genpd->domain);
1546
14b53064
UH
1547 genpd->device_count++;
1548 genpd->max_off_time_changed = true;
1549
1d5fcfec 1550 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
6ff7bb0d 1551
35241d12 1552 genpd_unlock(genpd);
2071ac98 1553 out:
c0356db7
UH
1554 if (ret)
1555 genpd_free_dev_data(dev, gpd_data);
1556 else
0b07ee94
VK
1557 dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1558 DEV_PM_QOS_RESUME_LATENCY);
1d5fcfec 1559
f721889f
RW
1560 return ret;
1561}
19efa5ff
JH
1562
1563/**
1a7a6707 1564 * pm_genpd_add_device - Add a device to an I/O PM domain.
19efa5ff
JH
1565 * @genpd: PM domain to add the device to.
1566 * @dev: Device to be added.
19efa5ff 1567 */
1a7a6707 1568int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
19efa5ff
JH
1569{
1570 int ret;
1571
1572 mutex_lock(&gpd_list_lock);
f9ccd7c3 1573 ret = genpd_add_device(genpd, dev, dev);
19efa5ff
JH
1574 mutex_unlock(&gpd_list_lock);
1575
1576 return ret;
1577}
1a7a6707 1578EXPORT_SYMBOL_GPL(pm_genpd_add_device);
f721889f 1579
85168d56
UH
1580static int genpd_remove_device(struct generic_pm_domain *genpd,
1581 struct device *dev)
f721889f 1582{
6ff7bb0d 1583 struct generic_pm_domain_data *gpd_data;
4605ab65 1584 struct pm_domain_data *pdd;
f9ccd7c3 1585 int ret = 0;
f721889f
RW
1586
1587 dev_dbg(dev, "%s()\n", __func__);
1588
c0356db7
UH
1589 pdd = dev->power.subsys_data->domain_data;
1590 gpd_data = to_gpd_data(pdd);
0b07ee94
VK
1591 dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1592 DEV_PM_QOS_RESUME_LATENCY);
c0356db7 1593
35241d12 1594 genpd_lock(genpd);
f721889f 1595
596ba34b
RW
1596 if (genpd->prepared_count > 0) {
1597 ret = -EAGAIN;
1598 goto out;
1599 }
1600
6ff7bb0d
RW
1601 genpd->device_count--;
1602 genpd->max_off_time_changed = true;
1603
f9ccd7c3 1604 genpd_clear_cpumask(genpd, gpd_data->cpu);
975e83cf
SH
1605 dev_pm_domain_set(dev, NULL);
1606
efa69025 1607 list_del_init(&pdd->list_node);
6ff7bb0d 1608
35241d12 1609 genpd_unlock(genpd);
6ff7bb0d 1610
2071ac98
JW
1611 if (genpd->detach_dev)
1612 genpd->detach_dev(genpd, dev);
1613
c1dbe2fb 1614 genpd_free_dev_data(dev, gpd_data);
1d5fcfec 1615
6ff7bb0d 1616 return 0;
f721889f 1617
596ba34b 1618 out:
35241d12 1619 genpd_unlock(genpd);
0b07ee94 1620 dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
f721889f
RW
1621
1622 return ret;
1623}
85168d56
UH
1624
1625/**
1626 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
85168d56
UH
1627 * @dev: Device to be removed.
1628 */
924f4486 1629int pm_genpd_remove_device(struct device *dev)
85168d56 1630{
b3ad17c0 1631 struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
924f4486
UH
1632
1633 if (!genpd)
85168d56
UH
1634 return -EINVAL;
1635
1636 return genpd_remove_device(genpd, dev);
1637}
24c96dc7 1638EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
f721889f 1639
d4f81383
UH
1640/**
1641 * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
1642 *
1643 * @dev: Device that should be associated with the notifier
1644 * @nb: The notifier block to register
1645 *
1646 * Users may call this function to add a genpd power on/off notifier for an
1647 * attached @dev. Only one notifier per device is allowed. The notifier is
1648 * sent when genpd is powering on/off the PM domain.
1649 *
1650 * It is assumed that the user guarantee that the genpd wouldn't be detached
1651 * while this routine is getting called.
1652 *
1653 * Returns 0 on success and negative error values on failures.
1654 */
1655int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
1656{
1657 struct generic_pm_domain *genpd;
1658 struct generic_pm_domain_data *gpd_data;
1659 int ret;
1660
1661 genpd = dev_to_genpd_safe(dev);
1662 if (!genpd)
1663 return -ENODEV;
1664
1665 if (WARN_ON(!dev->power.subsys_data ||
1666 !dev->power.subsys_data->domain_data))
1667 return -EINVAL;
1668
1669 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1670 if (gpd_data->power_nb)
1671 return -EEXIST;
1672
1673 genpd_lock(genpd);
1674 ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
1675 genpd_unlock(genpd);
1676
1677 if (ret) {
1678 dev_warn(dev, "failed to add notifier for PM domain %s\n",
1679 genpd->name);
1680 return ret;
1681 }
1682
1683 gpd_data->power_nb = nb;
1684 return 0;
1685}
1686EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
1687
1688/**
1689 * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
1690 *
1691 * @dev: Device that is associated with the notifier
1692 *
1693 * Users may call this function to remove a genpd power on/off notifier for an
1694 * attached @dev.
1695 *
1696 * It is assumed that the user guarantee that the genpd wouldn't be detached
1697 * while this routine is getting called.
1698 *
1699 * Returns 0 on success and negative error values on failures.
1700 */
1701int dev_pm_genpd_remove_notifier(struct device *dev)
1702{
1703 struct generic_pm_domain *genpd;
1704 struct generic_pm_domain_data *gpd_data;
1705 int ret;
1706
1707 genpd = dev_to_genpd_safe(dev);
1708 if (!genpd)
1709 return -ENODEV;
1710
1711 if (WARN_ON(!dev->power.subsys_data ||
1712 !dev->power.subsys_data->domain_data))
1713 return -EINVAL;
1714
1715 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1716 if (!gpd_data->power_nb)
1717 return -ENODEV;
1718
1719 genpd_lock(genpd);
1720 ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
1721 gpd_data->power_nb);
1722 genpd_unlock(genpd);
1723
1724 if (ret) {
1725 dev_warn(dev, "failed to remove notifier for PM domain %s\n",
1726 genpd->name);
1727 return ret;
1728 }
1729
1730 gpd_data->power_nb = NULL;
1731 return 0;
1732}
1733EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
1734
19efa5ff
JH
1735static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1736 struct generic_pm_domain *subdomain)
f721889f 1737{
2547923d 1738 struct gpd_link *link, *itr;
f721889f
RW
1739 int ret = 0;
1740
fb7268be
RW
1741 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1742 || genpd == subdomain)
f721889f
RW
1743 return -EINVAL;
1744
d716f479
LI
1745 /*
1746 * If the domain can be powered on/off in an IRQ safe
1747 * context, ensure that the subdomain can also be
1748 * powered on/off in that context.
1749 */
1750 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
44cae7d5 1751 WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
d716f479
LI
1752 genpd->name, subdomain->name);
1753 return -EINVAL;
1754 }
1755
2547923d
LI
1756 link = kzalloc(sizeof(*link), GFP_KERNEL);
1757 if (!link)
1758 return -ENOMEM;
1759
35241d12
LI
1760 genpd_lock(subdomain);
1761 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
f721889f 1762
41e2c8e0 1763 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
f721889f
RW
1764 ret = -EINVAL;
1765 goto out;
1766 }
1767
8d87ae48
KC
1768 list_for_each_entry(itr, &genpd->parent_links, parent_node) {
1769 if (itr->child == subdomain && itr->parent == genpd) {
f721889f
RW
1770 ret = -EINVAL;
1771 goto out;
1772 }
1773 }
1774
8d87ae48
KC
1775 link->parent = genpd;
1776 list_add_tail(&link->parent_node, &genpd->parent_links);
1777 link->child = subdomain;
1778 list_add_tail(&link->child_node, &subdomain->child_links);
41e2c8e0 1779 if (genpd_status_on(subdomain))
c4bb3160 1780 genpd_sd_counter_inc(genpd);
f721889f 1781
f721889f 1782 out:
35241d12
LI
1783 genpd_unlock(genpd);
1784 genpd_unlock(subdomain);
2547923d
LI
1785 if (ret)
1786 kfree(link);
f721889f
RW
1787 return ret;
1788}
19efa5ff
JH
1789
1790/**
1791 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
8d87ae48 1792 * @genpd: Leader PM domain to add the subdomain to.
19efa5ff
JH
1793 * @subdomain: Subdomain to be added.
1794 */
1795int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1796 struct generic_pm_domain *subdomain)
1797{
1798 int ret;
1799
1800 mutex_lock(&gpd_list_lock);
1801 ret = genpd_add_subdomain(genpd, subdomain);
1802 mutex_unlock(&gpd_list_lock);
1803
1804 return ret;
1805}
d60ee966 1806EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
f721889f
RW
1807
1808/**
1809 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
8d87ae48 1810 * @genpd: Leader PM domain to remove the subdomain from.
5063ce15 1811 * @subdomain: Subdomain to be removed.
f721889f
RW
1812 */
1813int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
5063ce15 1814 struct generic_pm_domain *subdomain)
f721889f 1815{
c6e83cac 1816 struct gpd_link *l, *link;
f721889f
RW
1817 int ret = -EINVAL;
1818
5063ce15 1819 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
f721889f
RW
1820 return -EINVAL;
1821
35241d12
LI
1822 genpd_lock(subdomain);
1823 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
f721889f 1824
8d87ae48 1825 if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
7a5bd127
JP
1826 pr_warn("%s: unable to remove subdomain %s\n",
1827 genpd->name, subdomain->name);
30e7a65b
JH
1828 ret = -EBUSY;
1829 goto out;
1830 }
1831
8d87ae48
KC
1832 list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
1833 if (link->child != subdomain)
f721889f
RW
1834 continue;
1835
8d87ae48
KC
1836 list_del(&link->parent_node);
1837 list_del(&link->child_node);
5063ce15 1838 kfree(link);
41e2c8e0 1839 if (genpd_status_on(subdomain))
f721889f
RW
1840 genpd_sd_counter_dec(genpd);
1841
f721889f
RW
1842 ret = 0;
1843 break;
1844 }
1845
30e7a65b 1846out:
35241d12
LI
1847 genpd_unlock(genpd);
1848 genpd_unlock(subdomain);
f721889f
RW
1849
1850 return ret;
1851}
d60ee966 1852EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
f721889f 1853
49a27e27
UH
1854static void genpd_free_default_power_state(struct genpd_power_state *states,
1855 unsigned int state_count)
1856{
1857 kfree(states);
1858}
1859
59d65b73
LI
1860static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1861{
1862 struct genpd_power_state *state;
1863
1864 state = kzalloc(sizeof(*state), GFP_KERNEL);
1865 if (!state)
1866 return -ENOMEM;
1867
1868 genpd->states = state;
1869 genpd->state_count = 1;
49a27e27 1870 genpd->free_states = genpd_free_default_power_state;
59d65b73
LI
1871
1872 return 0;
1873}
1874
d716f479
LI
1875static void genpd_lock_init(struct generic_pm_domain *genpd)
1876{
1877 if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
1878 spin_lock_init(&genpd->slock);
1879 genpd->lock_ops = &genpd_spin_ops;
1880 } else {
1881 mutex_init(&genpd->mlock);
1882 genpd->lock_ops = &genpd_mtx_ops;
1883 }
1884}
1885
f721889f
RW
1886/**
1887 * pm_genpd_init - Initialize a generic I/O PM domain object.
1888 * @genpd: PM domain object to initialize.
1889 * @gov: PM domain governor to associate with the domain (may be NULL).
1890 * @is_off: Initial value of the domain's power_is_off field.
7eb231c3
UH
1891 *
1892 * Returns 0 on successful initialization, else a negative error code.
f721889f 1893 */
7eb231c3
UH
1894int pm_genpd_init(struct generic_pm_domain *genpd,
1895 struct dev_power_governor *gov, bool is_off)
f721889f 1896{
59d65b73
LI
1897 int ret;
1898
f721889f 1899 if (IS_ERR_OR_NULL(genpd))
7eb231c3 1900 return -EINVAL;
f721889f 1901
8d87ae48
KC
1902 INIT_LIST_HEAD(&genpd->parent_links);
1903 INIT_LIST_HEAD(&genpd->child_links);
f721889f 1904 INIT_LIST_HEAD(&genpd->dev_list);
d4f81383 1905 RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
d716f479 1906 genpd_lock_init(genpd);
f721889f
RW
1907 genpd->gov = gov;
1908 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
c4bb3160 1909 atomic_set(&genpd->sd_count, 0);
49f618e1 1910 genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
596ba34b 1911 genpd->device_count = 0;
221e9b58 1912 genpd->max_off_time_ns = -1;
6ff7bb0d 1913 genpd->max_off_time_changed = true;
de0aa06d
JH
1914 genpd->provider = NULL;
1915 genpd->has_provider = false;
afece3ab 1916 genpd->accounting_time = ktime_get();
795bd2e7
UH
1917 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
1918 genpd->domain.ops.runtime_resume = genpd_runtime_resume;
9e9704ea
UH
1919 genpd->domain.ops.prepare = genpd_prepare;
1920 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
1921 genpd->domain.ops.resume_noirq = genpd_resume_noirq;
1922 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
1923 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
1924 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
1925 genpd->domain.ops.restore_noirq = genpd_restore_noirq;
1926 genpd->domain.ops.complete = genpd_complete;
ea71c596 1927 genpd->domain.start = genpd_dev_pm_start;
c11f6f5b
UH
1928
1929 if (genpd->flags & GENPD_FLAG_PM_CLK) {
1930 genpd->dev_ops.stop = pm_clk_suspend;
1931 genpd->dev_ops.start = pm_clk_resume;
1932 }
1933
ffaa42e8 1934 /* Always-on domains must be powered on at initialization. */
ed61e18a
LC
1935 if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
1936 !genpd_status_on(genpd))
ffaa42e8
UH
1937 return -EINVAL;
1938
eb594b73
UH
1939 if (genpd_is_cpu_domain(genpd) &&
1940 !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
1941 return -ENOMEM;
1942
fc5cbf0c 1943 /* Use only one "off" state if there were no states declared */
59d65b73
LI
1944 if (genpd->state_count == 0) {
1945 ret = genpd_set_default_power_state(genpd);
eb594b73
UH
1946 if (ret) {
1947 if (genpd_is_cpu_domain(genpd))
1948 free_cpumask_var(genpd->cpus);
59d65b73 1949 return ret;
eb594b73 1950 }
46b7fe94 1951 } else if (!gov && genpd->state_count > 1) {
7a5bd127 1952 pr_warn("%s: no governor for states\n", genpd->name);
59d65b73 1953 }
fc5cbf0c 1954
401ea157
VK
1955 device_initialize(&genpd->dev);
1956 dev_set_name(&genpd->dev, "%s", genpd->name);
1957
5125bbf3
RW
1958 mutex_lock(&gpd_list_lock);
1959 list_add(&genpd->gpd_list_node, &gpd_list);
1960 mutex_unlock(&gpd_list_lock);
7eb231c3
UH
1961
1962 return 0;
5125bbf3 1963}
be5ed55d 1964EXPORT_SYMBOL_GPL(pm_genpd_init);
aa42240a 1965
3fe57710
JH
1966static int genpd_remove(struct generic_pm_domain *genpd)
1967{
1968 struct gpd_link *l, *link;
1969
1970 if (IS_ERR_OR_NULL(genpd))
1971 return -EINVAL;
1972
35241d12 1973 genpd_lock(genpd);
3fe57710
JH
1974
1975 if (genpd->has_provider) {
35241d12 1976 genpd_unlock(genpd);
3fe57710
JH
1977 pr_err("Provider present, unable to remove %s\n", genpd->name);
1978 return -EBUSY;
1979 }
1980
8d87ae48 1981 if (!list_empty(&genpd->parent_links) || genpd->device_count) {
35241d12 1982 genpd_unlock(genpd);
3fe57710
JH
1983 pr_err("%s: unable to remove %s\n", __func__, genpd->name);
1984 return -EBUSY;
1985 }
1986
8d87ae48
KC
1987 list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
1988 list_del(&link->parent_node);
1989 list_del(&link->child_node);
3fe57710
JH
1990 kfree(link);
1991 }
1992
1993 list_del(&genpd->gpd_list_node);
35241d12 1994 genpd_unlock(genpd);
3fe57710 1995 cancel_work_sync(&genpd->power_off_work);
eb594b73
UH
1996 if (genpd_is_cpu_domain(genpd))
1997 free_cpumask_var(genpd->cpus);
49a27e27
UH
1998 if (genpd->free_states)
1999 genpd->free_states(genpd->states, genpd->state_count);
2000
3fe57710
JH
2001 pr_debug("%s: removed %s\n", __func__, genpd->name);
2002
2003 return 0;
2004}
2005
2006/**
2007 * pm_genpd_remove - Remove a generic I/O PM domain
2008 * @genpd: Pointer to PM domain that is to be removed.
2009 *
2010 * To remove the PM domain, this function:
2011 * - Removes the PM domain as a subdomain to any parent domains,
2012 * if it was added.
2013 * - Removes the PM domain from the list of registered PM domains.
2014 *
2015 * The PM domain will only be removed, if the associated provider has
2016 * been removed, it is not a parent to any other PM domain and has no
2017 * devices associated with it.
2018 */
2019int pm_genpd_remove(struct generic_pm_domain *genpd)
2020{
2021 int ret;
2022
2023 mutex_lock(&gpd_list_lock);
2024 ret = genpd_remove(genpd);
2025 mutex_unlock(&gpd_list_lock);
2026
2027 return ret;
2028}
2029EXPORT_SYMBOL_GPL(pm_genpd_remove);
2030
aa42240a 2031#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
892ebdcc 2032
aa42240a
TF
2033/*
2034 * Device Tree based PM domain providers.
2035 *
2036 * The code below implements generic device tree based PM domain providers that
2037 * bind device tree nodes with generic PM domains registered in the system.
2038 *
2039 * Any driver that registers generic PM domains and needs to support binding of
2040 * devices to these domains is supposed to register a PM domain provider, which
2041 * maps a PM domain specifier retrieved from the device tree to a PM domain.
2042 *
2043 * Two simple mapping functions have been provided for convenience:
892ebdcc
JH
2044 * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
2045 * - genpd_xlate_onecell() for mapping of multiple PM domains per node by
aa42240a
TF
2046 * index.
2047 */
2048
2049/**
2050 * struct of_genpd_provider - PM domain provider registration structure
2051 * @link: Entry in global list of PM domain providers
2052 * @node: Pointer to device tree node of PM domain provider
2053 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
2054 * into a PM domain.
2055 * @data: context pointer to be passed into @xlate callback
2056 */
2057struct of_genpd_provider {
2058 struct list_head link;
2059 struct device_node *node;
2060 genpd_xlate_t xlate;
2061 void *data;
2062};
2063
2064/* List of registered PM domain providers. */
2065static LIST_HEAD(of_genpd_providers);
2066/* Mutex to protect the list above. */
2067static DEFINE_MUTEX(of_genpd_mutex);
2068
2069/**
892ebdcc 2070 * genpd_xlate_simple() - Xlate function for direct node-domain mapping
aa42240a
TF
2071 * @genpdspec: OF phandle args to map into a PM domain
2072 * @data: xlate function private data - pointer to struct generic_pm_domain
2073 *
2074 * This is a generic xlate function that can be used to model PM domains that
2075 * have their own device tree nodes. The private data of xlate function needs
2076 * to be a valid pointer to struct generic_pm_domain.
2077 */
892ebdcc 2078static struct generic_pm_domain *genpd_xlate_simple(
aa42240a
TF
2079 struct of_phandle_args *genpdspec,
2080 void *data)
2081{
aa42240a
TF
2082 return data;
2083}
aa42240a
TF
2084
2085/**
892ebdcc 2086 * genpd_xlate_onecell() - Xlate function using a single index.
aa42240a
TF
2087 * @genpdspec: OF phandle args to map into a PM domain
2088 * @data: xlate function private data - pointer to struct genpd_onecell_data
2089 *
2090 * This is a generic xlate function that can be used to model simple PM domain
2091 * controllers that have one device tree node and provide multiple PM domains.
2092 * A single cell is used as an index into an array of PM domains specified in
2093 * the genpd_onecell_data struct when registering the provider.
2094 */
892ebdcc 2095static struct generic_pm_domain *genpd_xlate_onecell(
aa42240a
TF
2096 struct of_phandle_args *genpdspec,
2097 void *data)
2098{
2099 struct genpd_onecell_data *genpd_data = data;
2100 unsigned int idx = genpdspec->args[0];
2101
2102 if (genpdspec->args_count != 1)
2103 return ERR_PTR(-EINVAL);
2104
2105 if (idx >= genpd_data->num_domains) {
2106 pr_err("%s: invalid domain index %u\n", __func__, idx);
2107 return ERR_PTR(-EINVAL);
2108 }
2109
2110 if (!genpd_data->domains[idx])
2111 return ERR_PTR(-ENOENT);
2112
2113 return genpd_data->domains[idx];
2114}
aa42240a
TF
2115
2116/**
892ebdcc 2117 * genpd_add_provider() - Register a PM domain provider for a node
aa42240a
TF
2118 * @np: Device node pointer associated with the PM domain provider.
2119 * @xlate: Callback for decoding PM domain from phandle arguments.
2120 * @data: Context pointer for @xlate callback.
2121 */
892ebdcc
JH
2122static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2123 void *data)
aa42240a
TF
2124{
2125 struct of_genpd_provider *cp;
2126
2127 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2128 if (!cp)
2129 return -ENOMEM;
2130
2131 cp->node = of_node_get(np);
2132 cp->data = data;
2133 cp->xlate = xlate;
2134
2135 mutex_lock(&of_genpd_mutex);
2136 list_add(&cp->link, &of_genpd_providers);
2137 mutex_unlock(&of_genpd_mutex);
ea11e94b 2138 pr_debug("Added domain provider from %pOF\n", np);
aa42240a
TF
2139
2140 return 0;
2141}
892ebdcc 2142
fe0c2baa
UH
2143static bool genpd_present(const struct generic_pm_domain *genpd)
2144{
2145 const struct generic_pm_domain *gpd;
2146
2147 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
2148 if (gpd == genpd)
2149 return true;
2150 return false;
2151}
2152
892ebdcc
JH
2153/**
2154 * of_genpd_add_provider_simple() - Register a simple PM domain provider
2155 * @np: Device node pointer associated with the PM domain provider.
2156 * @genpd: Pointer to PM domain associated with the PM domain provider.
2157 */
2158int of_genpd_add_provider_simple(struct device_node *np,
2159 struct generic_pm_domain *genpd)
2160{
0159ec67
JH
2161 int ret = -EINVAL;
2162
2163 if (!np || !genpd)
2164 return -EINVAL;
2165
2166 mutex_lock(&gpd_list_lock);
2167
6a0ae73d
VK
2168 if (!genpd_present(genpd))
2169 goto unlock;
2170
2171 genpd->dev.of_node = np;
2172
2173 /* Parse genpd OPP table */
2174 if (genpd->set_performance_state) {
2175 ret = dev_pm_opp_of_add_table(&genpd->dev);
2176 if (ret) {
dd461cd9
SG
2177 if (ret != -EPROBE_DEFER)
2178 dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
2179 ret);
6a0ae73d 2180 goto unlock;
8ce95844 2181 }
1067ae3e
VK
2182
2183 /*
2184 * Save table for faster processing while setting performance
2185 * state.
2186 */
2187 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
dd461cd9 2188 WARN_ON(IS_ERR(genpd->opp_table));
de0aa06d
JH
2189 }
2190
6a0ae73d
VK
2191 ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2192 if (ret) {
1067ae3e
VK
2193 if (genpd->set_performance_state) {
2194 dev_pm_opp_put_opp_table(genpd->opp_table);
6a0ae73d 2195 dev_pm_opp_of_remove_table(&genpd->dev);
1067ae3e 2196 }
6a0ae73d
VK
2197
2198 goto unlock;
2199 }
2200
2201 genpd->provider = &np->fwnode;
2202 genpd->has_provider = true;
2203
2204unlock:
0159ec67
JH
2205 mutex_unlock(&gpd_list_lock);
2206
2207 return ret;
892ebdcc
JH
2208}
2209EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2210
2211/**
2212 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2213 * @np: Device node pointer associated with the PM domain provider.
2214 * @data: Pointer to the data associated with the PM domain provider.
2215 */
2216int of_genpd_add_provider_onecell(struct device_node *np,
2217 struct genpd_onecell_data *data)
2218{
6a0ae73d 2219 struct generic_pm_domain *genpd;
0159ec67 2220 unsigned int i;
de0aa06d 2221 int ret = -EINVAL;
0159ec67
JH
2222
2223 if (!np || !data)
2224 return -EINVAL;
2225
2226 mutex_lock(&gpd_list_lock);
2227
40845524
TR
2228 if (!data->xlate)
2229 data->xlate = genpd_xlate_onecell;
2230
0159ec67 2231 for (i = 0; i < data->num_domains; i++) {
6a0ae73d
VK
2232 genpd = data->domains[i];
2233
2234 if (!genpd)
609bed67 2235 continue;
6a0ae73d 2236 if (!genpd_present(genpd))
de0aa06d
JH
2237 goto error;
2238
6a0ae73d
VK
2239 genpd->dev.of_node = np;
2240
2241 /* Parse genpd OPP table */
2242 if (genpd->set_performance_state) {
2243 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2244 if (ret) {
dd461cd9
SG
2245 if (ret != -EPROBE_DEFER)
2246 dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
2247 i, ret);
6a0ae73d
VK
2248 goto error;
2249 }
1067ae3e
VK
2250
2251 /*
2252 * Save table for faster processing while setting
2253 * performance state.
2254 */
2255 genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
dd461cd9 2256 WARN_ON(IS_ERR(genpd->opp_table));
6a0ae73d
VK
2257 }
2258
2259 genpd->provider = &np->fwnode;
2260 genpd->has_provider = true;
0159ec67
JH
2261 }
2262
40845524 2263 ret = genpd_add_provider(np, data->xlate, data);
de0aa06d
JH
2264 if (ret < 0)
2265 goto error;
2266
2267 mutex_unlock(&gpd_list_lock);
2268
2269 return 0;
2270
2271error:
2272 while (i--) {
6a0ae73d
VK
2273 genpd = data->domains[i];
2274
2275 if (!genpd)
609bed67 2276 continue;
6a0ae73d
VK
2277
2278 genpd->provider = NULL;
2279 genpd->has_provider = false;
2280
1067ae3e
VK
2281 if (genpd->set_performance_state) {
2282 dev_pm_opp_put_opp_table(genpd->opp_table);
6a0ae73d 2283 dev_pm_opp_of_remove_table(&genpd->dev);
1067ae3e 2284 }
de0aa06d 2285 }
0159ec67
JH
2286
2287 mutex_unlock(&gpd_list_lock);
2288
2289 return ret;
892ebdcc
JH
2290}
2291EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
aa42240a
TF
2292
2293/**
2294 * of_genpd_del_provider() - Remove a previously registered PM domain provider
2295 * @np: Device node pointer associated with the PM domain provider
2296 */
2297void of_genpd_del_provider(struct device_node *np)
2298{
b556b15d 2299 struct of_genpd_provider *cp, *tmp;
de0aa06d 2300 struct generic_pm_domain *gpd;
aa42240a 2301
de0aa06d 2302 mutex_lock(&gpd_list_lock);
aa42240a 2303 mutex_lock(&of_genpd_mutex);
b556b15d 2304 list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
aa42240a 2305 if (cp->node == np) {
de0aa06d
JH
2306 /*
2307 * For each PM domain associated with the
2308 * provider, set the 'has_provider' to false
2309 * so that the PM domain can be safely removed.
2310 */
6a0ae73d
VK
2311 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2312 if (gpd->provider == &np->fwnode) {
de0aa06d
JH
2313 gpd->has_provider = false;
2314
6a0ae73d
VK
2315 if (!gpd->set_performance_state)
2316 continue;
2317
1067ae3e 2318 dev_pm_opp_put_opp_table(gpd->opp_table);
6a0ae73d
VK
2319 dev_pm_opp_of_remove_table(&gpd->dev);
2320 }
2321 }
2322
aa42240a
TF
2323 list_del(&cp->link);
2324 of_node_put(cp->node);
2325 kfree(cp);
2326 break;
2327 }
2328 }
2329 mutex_unlock(&of_genpd_mutex);
de0aa06d 2330 mutex_unlock(&gpd_list_lock);
aa42240a
TF
2331}
2332EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2333
2334/**
f58d4e5a 2335 * genpd_get_from_provider() - Look-up PM domain
aa42240a
TF
2336 * @genpdspec: OF phandle args to use for look-up
2337 *
2338 * Looks for a PM domain provider under the node specified by @genpdspec and if
2339 * found, uses xlate function of the provider to map phandle args to a PM
2340 * domain.
2341 *
2342 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2343 * on failure.
2344 */
f58d4e5a 2345static struct generic_pm_domain *genpd_get_from_provider(
aa42240a
TF
2346 struct of_phandle_args *genpdspec)
2347{
2348 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2349 struct of_genpd_provider *provider;
2350
41795a8a
JH
2351 if (!genpdspec)
2352 return ERR_PTR(-EINVAL);
2353
aa42240a
TF
2354 mutex_lock(&of_genpd_mutex);
2355
2356 /* Check if we have such a provider in our array */
2357 list_for_each_entry(provider, &of_genpd_providers, link) {
2358 if (provider->node == genpdspec->np)
2359 genpd = provider->xlate(genpdspec, provider->data);
2360 if (!IS_ERR(genpd))
2361 break;
2362 }
2363
2364 mutex_unlock(&of_genpd_mutex);
2365
2366 return genpd;
2367}
2368
ec69572b
JH
2369/**
2370 * of_genpd_add_device() - Add a device to an I/O PM domain
2371 * @genpdspec: OF phandle args to use for look-up PM domain
2372 * @dev: Device to be added.
2373 *
2374 * Looks-up an I/O PM domain based upon phandle args provided and adds
2375 * the device to the PM domain. Returns a negative error code on failure.
2376 */
2377int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
2378{
2379 struct generic_pm_domain *genpd;
19efa5ff
JH
2380 int ret;
2381
2382 mutex_lock(&gpd_list_lock);
ec69572b 2383
f58d4e5a 2384 genpd = genpd_get_from_provider(genpdspec);
19efa5ff
JH
2385 if (IS_ERR(genpd)) {
2386 ret = PTR_ERR(genpd);
2387 goto out;
2388 }
2389
f9ccd7c3 2390 ret = genpd_add_device(genpd, dev, dev);
ec69572b 2391
19efa5ff
JH
2392out:
2393 mutex_unlock(&gpd_list_lock);
2394
2395 return ret;
ec69572b
JH
2396}
2397EXPORT_SYMBOL_GPL(of_genpd_add_device);
2398
2399/**
2400 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2401 * @parent_spec: OF phandle args to use for parent PM domain look-up
2402 * @subdomain_spec: OF phandle args to use for subdomain look-up
2403 *
2404 * Looks-up a parent PM domain and subdomain based upon phandle args
2405 * provided and adds the subdomain to the parent PM domain. Returns a
2406 * negative error code on failure.
2407 */
2408int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
2409 struct of_phandle_args *subdomain_spec)
2410{
2411 struct generic_pm_domain *parent, *subdomain;
19efa5ff
JH
2412 int ret;
2413
2414 mutex_lock(&gpd_list_lock);
ec69572b 2415
f58d4e5a 2416 parent = genpd_get_from_provider(parent_spec);
19efa5ff
JH
2417 if (IS_ERR(parent)) {
2418 ret = PTR_ERR(parent);
2419 goto out;
2420 }
ec69572b 2421
f58d4e5a 2422 subdomain = genpd_get_from_provider(subdomain_spec);
19efa5ff
JH
2423 if (IS_ERR(subdomain)) {
2424 ret = PTR_ERR(subdomain);
2425 goto out;
2426 }
2427
2428 ret = genpd_add_subdomain(parent, subdomain);
ec69572b 2429
19efa5ff
JH
2430out:
2431 mutex_unlock(&gpd_list_lock);
2432
2433 return ret;
ec69572b
JH
2434}
2435EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2436
dedd1492
UH
2437/**
2438 * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2439 * @parent_spec: OF phandle args to use for parent PM domain look-up
2440 * @subdomain_spec: OF phandle args to use for subdomain look-up
2441 *
2442 * Looks-up a parent PM domain and subdomain based upon phandle args
2443 * provided and removes the subdomain from the parent PM domain. Returns a
2444 * negative error code on failure.
2445 */
2446int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
2447 struct of_phandle_args *subdomain_spec)
2448{
2449 struct generic_pm_domain *parent, *subdomain;
2450 int ret;
2451
2452 mutex_lock(&gpd_list_lock);
2453
2454 parent = genpd_get_from_provider(parent_spec);
2455 if (IS_ERR(parent)) {
2456 ret = PTR_ERR(parent);
2457 goto out;
2458 }
2459
2460 subdomain = genpd_get_from_provider(subdomain_spec);
2461 if (IS_ERR(subdomain)) {
2462 ret = PTR_ERR(subdomain);
2463 goto out;
2464 }
2465
2466 ret = pm_genpd_remove_subdomain(parent, subdomain);
2467
2468out:
2469 mutex_unlock(&gpd_list_lock);
2470
2471 return ret;
2472}
2473EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
2474
17926551
JH
2475/**
2476 * of_genpd_remove_last - Remove the last PM domain registered for a provider
2477 * @provider: Pointer to device structure associated with provider
2478 *
2479 * Find the last PM domain that was added by a particular provider and
2480 * remove this PM domain from the list of PM domains. The provider is
2481 * identified by the 'provider' device structure that is passed. The PM
2482 * domain will only be removed, if the provider associated with domain
2483 * has been removed.
2484 *
2485 * Returns a valid pointer to struct generic_pm_domain on success or
2486 * ERR_PTR() on failure.
2487 */
2488struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2489{
a7e2d1bc 2490 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
17926551
JH
2491 int ret;
2492
2493 if (IS_ERR_OR_NULL(np))
2494 return ERR_PTR(-EINVAL);
2495
2496 mutex_lock(&gpd_list_lock);
a7e2d1bc 2497 list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
17926551
JH
2498 if (gpd->provider == &np->fwnode) {
2499 ret = genpd_remove(gpd);
2500 genpd = ret ? ERR_PTR(ret) : gpd;
2501 break;
2502 }
2503 }
2504 mutex_unlock(&gpd_list_lock);
2505
2506 return genpd;
2507}
2508EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2509
3c095f32
UH
2510static void genpd_release_dev(struct device *dev)
2511{
e8b04de9 2512 of_node_put(dev->of_node);
3c095f32
UH
2513 kfree(dev);
2514}
2515
2516static struct bus_type genpd_bus_type = {
2517 .name = "genpd",
2518};
2519
aa42240a
TF
2520/**
2521 * genpd_dev_pm_detach - Detach a device from its PM domain.
8bb6944e 2522 * @dev: Device to detach.
aa42240a
TF
2523 * @power_off: Currently not used
2524 *
2525 * Try to locate a corresponding generic PM domain, which the device was
2526 * attached to previously. If such is found, the device is detached from it.
2527 */
2528static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2529{
446d999c 2530 struct generic_pm_domain *pd;
93af5e93 2531 unsigned int i;
aa42240a
TF
2532 int ret = 0;
2533
85168d56
UH
2534 pd = dev_to_genpd(dev);
2535 if (IS_ERR(pd))
aa42240a
TF
2536 return;
2537
2538 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2539
93af5e93 2540 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
85168d56 2541 ret = genpd_remove_device(pd, dev);
aa42240a
TF
2542 if (ret != -EAGAIN)
2543 break;
93af5e93
GU
2544
2545 mdelay(i);
aa42240a
TF
2546 cond_resched();
2547 }
2548
2549 if (ret < 0) {
2550 dev_err(dev, "failed to remove from PM domain %s: %d",
2551 pd->name, ret);
2552 return;
2553 }
2554
2555 /* Check if PM domain can be powered off after removing this device. */
2556 genpd_queue_power_off_work(pd);
3c095f32
UH
2557
2558 /* Unregister the device if it was created by genpd. */
2559 if (dev->bus == &genpd_bus_type)
2560 device_unregister(dev);
aa42240a
TF
2561}
2562
632f7ce3
RK
2563static void genpd_dev_pm_sync(struct device *dev)
2564{
2565 struct generic_pm_domain *pd;
2566
2567 pd = dev_to_genpd(dev);
2568 if (IS_ERR(pd))
2569 return;
2570
2571 genpd_queue_power_off_work(pd);
2572}
2573
51dcf748
UH
2574static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
2575 unsigned int index, bool power_on)
aa42240a
TF
2576{
2577 struct of_phandle_args pd_args;
2578 struct generic_pm_domain *pd;
2579 int ret;
2580
e8b04de9 2581 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
8cb1cbd6 2582 "#power-domain-cells", index, &pd_args);
001d50c9 2583 if (ret < 0)
bcd931f2 2584 return ret;
aa42240a 2585
19efa5ff 2586 mutex_lock(&gpd_list_lock);
f58d4e5a 2587 pd = genpd_get_from_provider(&pd_args);
265e2cf6 2588 of_node_put(pd_args.np);
aa42240a 2589 if (IS_ERR(pd)) {
19efa5ff 2590 mutex_unlock(&gpd_list_lock);
aa42240a
TF
2591 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2592 __func__, PTR_ERR(pd));
51dcf748 2593 return driver_deferred_probe_check_state(base_dev);
aa42240a
TF
2594 }
2595
2596 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2597
f9ccd7c3 2598 ret = genpd_add_device(pd, dev, base_dev);
19efa5ff 2599 mutex_unlock(&gpd_list_lock);
aa42240a
TF
2600
2601 if (ret < 0) {
34994692
GU
2602 if (ret != -EPROBE_DEFER)
2603 dev_err(dev, "failed to add to PM domain %s: %d",
2604 pd->name, ret);
919b7308 2605 return ret;
aa42240a
TF
2606 }
2607
2608 dev->pm_domain->detach = genpd_dev_pm_detach;
632f7ce3 2609 dev->pm_domain->sync = genpd_dev_pm_sync;
aa42240a 2610
895b6612
UH
2611 if (power_on) {
2612 genpd_lock(pd);
2613 ret = genpd_power_on(pd, 0);
2614 genpd_unlock(pd);
2615 }
72038df3
UH
2616
2617 if (ret)
2618 genpd_remove_device(pd, dev);
919b7308
UH
2619
2620 return ret ? -EPROBE_DEFER : 1;
aa42240a 2621}
8cb1cbd6
UH
2622
2623/**
2624 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2625 * @dev: Device to attach.
2626 *
2627 * Parse device's OF node to find a PM domain specifier. If such is found,
2628 * attaches the device to retrieved pm_domain ops.
2629 *
2630 * Returns 1 on successfully attached PM domain, 0 when the device don't need a
2631 * PM domain or when multiple power-domains exists for it, else a negative error
2632 * code. Note that if a power-domain exists for the device, but it cannot be
2633 * found or turned on, then return -EPROBE_DEFER to ensure that the device is
2634 * not probed and to re-try again later.
2635 */
2636int genpd_dev_pm_attach(struct device *dev)
2637{
2638 if (!dev->of_node)
2639 return 0;
2640
2641 /*
2642 * Devices with multiple PM domains must be attached separately, as we
2643 * can only attach one PM domain per device.
2644 */
2645 if (of_count_phandle_with_args(dev->of_node, "power-domains",
2646 "#power-domain-cells") != 1)
2647 return 0;
2648
51dcf748 2649 return __genpd_dev_pm_attach(dev, dev, 0, true);
8cb1cbd6 2650}
aa42240a 2651EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
30f60428 2652
3c095f32
UH
2653/**
2654 * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
2655 * @dev: The device used to lookup the PM domain.
2656 * @index: The index of the PM domain.
2657 *
2658 * Parse device's OF node to find a PM domain specifier at the provided @index.
2659 * If such is found, creates a virtual device and attaches it to the retrieved
2660 * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
2661 * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
2662 *
2663 * Returns the created virtual device if successfully attached PM domain, NULL
2664 * when the device don't need a PM domain, else an ERR_PTR() in case of
2665 * failures. If a power-domain exists for the device, but cannot be found or
2666 * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
2667 * is not probed and to re-try again later.
2668 */
2669struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2670 unsigned int index)
2671{
560928b2 2672 struct device *virt_dev;
3c095f32
UH
2673 int num_domains;
2674 int ret;
2675
2676 if (!dev->of_node)
2677 return NULL;
2678
3ccf3f0c 2679 /* Verify that the index is within a valid range. */
3c095f32
UH
2680 num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
2681 "#power-domain-cells");
3ccf3f0c 2682 if (index >= num_domains)
3c095f32
UH
2683 return NULL;
2684
2685 /* Allocate and register device on the genpd bus. */
560928b2
VK
2686 virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
2687 if (!virt_dev)
3c095f32
UH
2688 return ERR_PTR(-ENOMEM);
2689
560928b2
VK
2690 dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2691 virt_dev->bus = &genpd_bus_type;
2692 virt_dev->release = genpd_release_dev;
e8b04de9 2693 virt_dev->of_node = of_node_get(dev->of_node);
3c095f32 2694
560928b2 2695 ret = device_register(virt_dev);
3c095f32 2696 if (ret) {
71b77697 2697 put_device(virt_dev);
3c095f32
UH
2698 return ERR_PTR(ret);
2699 }
2700
2701 /* Try to attach the device to the PM domain at the specified index. */
51dcf748 2702 ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
3c095f32 2703 if (ret < 1) {
560928b2 2704 device_unregister(virt_dev);
3c095f32
UH
2705 return ret ? ERR_PTR(ret) : NULL;
2706 }
2707
560928b2
VK
2708 pm_runtime_enable(virt_dev);
2709 genpd_queue_power_off_work(dev_to_genpd(virt_dev));
3c095f32 2710
560928b2 2711 return virt_dev;
3c095f32
UH
2712}
2713EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
2714
5d6be70a
UH
2715/**
2716 * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
2717 * @dev: The device used to lookup the PM domain.
2718 * @name: The name of the PM domain.
2719 *
2720 * Parse device's OF node to find a PM domain specifier using the
2721 * power-domain-names DT property. For further description see
2722 * genpd_dev_pm_attach_by_id().
2723 */
7416f1f2 2724struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
5d6be70a
UH
2725{
2726 int index;
2727
2728 if (!dev->of_node)
2729 return NULL;
2730
2731 index = of_property_match_string(dev->of_node, "power-domain-names",
2732 name);
2733 if (index < 0)
2734 return NULL;
2735
2736 return genpd_dev_pm_attach_by_id(dev, index);
2737}
2738
30f60428 2739static const struct of_device_id idle_state_match[] = {
598da548 2740 { .compatible = "domain-idle-state", },
30f60428
LI
2741 { }
2742};
2743
2744static int genpd_parse_state(struct genpd_power_state *genpd_state,
2745 struct device_node *state_node)
2746{
2747 int err;
2748 u32 residency;
2749 u32 entry_latency, exit_latency;
30f60428
LI
2750
2751 err = of_property_read_u32(state_node, "entry-latency-us",
2752 &entry_latency);
2753 if (err) {
ea11e94b 2754 pr_debug(" * %pOF missing entry-latency-us property\n",
7a5bd127 2755 state_node);
30f60428
LI
2756 return -EINVAL;
2757 }
2758
2759 err = of_property_read_u32(state_node, "exit-latency-us",
2760 &exit_latency);
2761 if (err) {
ea11e94b 2762 pr_debug(" * %pOF missing exit-latency-us property\n",
7a5bd127 2763 state_node);
30f60428
LI
2764 return -EINVAL;
2765 }
2766
2767 err = of_property_read_u32(state_node, "min-residency-us", &residency);
2768 if (!err)
2769 genpd_state->residency_ns = 1000 * residency;
2770
2771 genpd_state->power_on_latency_ns = 1000 * exit_latency;
2772 genpd_state->power_off_latency_ns = 1000 * entry_latency;
0c9b694a 2773 genpd_state->fwnode = &state_node->fwnode;
30f60428
LI
2774
2775 return 0;
2776}
2777
a3381e3a
UH
2778static int genpd_iterate_idle_states(struct device_node *dn,
2779 struct genpd_power_state *states)
2780{
2781 int ret;
2782 struct of_phandle_iterator it;
2783 struct device_node *np;
2784 int i = 0;
2785
2786 ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2787 if (ret <= 0)
56cb2689 2788 return ret == -ENOENT ? 0 : ret;
a3381e3a
UH
2789
2790 /* Loop over the phandles until all the requested entry is found */
2791 of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
2792 np = it.node;
2793 if (!of_match_node(idle_state_match, np))
2794 continue;
2795 if (states) {
2796 ret = genpd_parse_state(&states[i], np);
2797 if (ret) {
2798 pr_err("Parsing idle state node %pOF failed with err %d\n",
2799 np, ret);
2800 of_node_put(np);
2801 return ret;
2802 }
2803 }
2804 i++;
2805 }
2806
2807 return i;
2808}
2809
30f60428
LI
2810/**
2811 * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2812 *
2813 * @dn: The genpd device node
2814 * @states: The pointer to which the state array will be saved.
2815 * @n: The count of elements in the array returned from this function.
2816 *
2817 * Returns the device states parsed from the OF node. The memory for the states
2818 * is allocated by this function and is the responsibility of the caller to
2c361684
UH
2819 * free the memory after use. If any or zero compatible domain idle states is
2820 * found it returns 0 and in case of errors, a negative error code is returned.
30f60428
LI
2821 */
2822int of_genpd_parse_idle_states(struct device_node *dn,
2823 struct genpd_power_state **states, int *n)
2824{
2825 struct genpd_power_state *st;
a3381e3a 2826 int ret;
30f60428 2827
a3381e3a 2828 ret = genpd_iterate_idle_states(dn, NULL);
2c361684
UH
2829 if (ret < 0)
2830 return ret;
2831
2832 if (!ret) {
2833 *states = NULL;
2834 *n = 0;
2835 return 0;
2836 }
30f60428 2837
a3381e3a 2838 st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
30f60428
LI
2839 if (!st)
2840 return -ENOMEM;
2841
a3381e3a
UH
2842 ret = genpd_iterate_idle_states(dn, st);
2843 if (ret <= 0) {
2844 kfree(st);
2845 return ret < 0 ? ret : -EINVAL;
30f60428
LI
2846 }
2847
a3381e3a
UH
2848 *states = st;
2849 *n = ret;
30f60428
LI
2850
2851 return 0;
2852}
2853EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
2854
e38f89d3
VK
2855/**
2856 * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
2857 *
2858 * @genpd_dev: Genpd's device for which the performance-state needs to be found.
2859 * @opp: struct dev_pm_opp of the OPP for which we need to find performance
2860 * state.
2861 *
2862 * Returns performance state encoded in the OPP of the genpd. This calls
2863 * platform specific genpd->opp_to_performance_state() callback to translate
2864 * power domain OPP to performance state.
2865 *
2866 * Returns performance state on success and 0 on failure.
2867 */
2868unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
2869 struct dev_pm_opp *opp)
2870{
2871 struct generic_pm_domain *genpd = NULL;
2872 int state;
2873
2874 genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
2875
2876 if (unlikely(!genpd->opp_to_performance_state))
2877 return 0;
2878
2879 genpd_lock(genpd);
2880 state = genpd->opp_to_performance_state(genpd, opp);
2881 genpd_unlock(genpd);
2882
2883 return state;
2884}
2885EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
2886
3c095f32
UH
2887static int __init genpd_bus_init(void)
2888{
2889 return bus_register(&genpd_bus_type);
2890}
2891core_initcall(genpd_bus_init);
2892
d30d819d 2893#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2bd5306a
MM
2894
2895
2896/*** debugfs support ***/
2897
8b0510b5 2898#ifdef CONFIG_DEBUG_FS
2bd5306a
MM
2899#include <linux/pm.h>
2900#include <linux/device.h>
2901#include <linux/debugfs.h>
2902#include <linux/seq_file.h>
2903#include <linux/init.h>
2904#include <linux/kobject.h>
9e9704ea 2905static struct dentry *genpd_debugfs_dir;
2bd5306a
MM
2906
2907/*
2908 * TODO: This function is a slightly modified version of rtpm_status_show
d30d819d 2909 * from sysfs.c, so generalize it.
2bd5306a 2910 */
2bd5306a
MM
2911static void rtpm_status_str(struct seq_file *s, struct device *dev)
2912{
2913 static const char * const status_lookup[] = {
2914 [RPM_ACTIVE] = "active",
2915 [RPM_RESUMING] = "resuming",
2916 [RPM_SUSPENDED] = "suspended",
2917 [RPM_SUSPENDING] = "suspending"
2918 };
2919 const char *p = "";
2920
2921 if (dev->power.runtime_error)
2922 p = "error";
2923 else if (dev->power.disable_depth)
2924 p = "unsupported";
2925 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2926 p = status_lookup[dev->power.runtime_status];
2927 else
2928 WARN_ON(1);
2929
2930 seq_puts(s, p);
2931}
2bd5306a 2932
9e9704ea
UH
2933static int genpd_summary_one(struct seq_file *s,
2934 struct generic_pm_domain *genpd)
2bd5306a
MM
2935{
2936 static const char * const status_lookup[] = {
49f618e1
UH
2937 [GENPD_STATE_ON] = "on",
2938 [GENPD_STATE_OFF] = "off"
2bd5306a
MM
2939 };
2940 struct pm_domain_data *pm_data;
2941 const char *kobj_path;
2942 struct gpd_link *link;
6954d432 2943 char state[16];
2bd5306a
MM
2944 int ret;
2945
35241d12 2946 ret = genpd_lock_interruptible(genpd);
2bd5306a
MM
2947 if (ret)
2948 return -ERESTARTSYS;
2949
66a5ca4b 2950 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2bd5306a 2951 goto exit;
41e2c8e0 2952 if (!genpd_status_on(genpd))
0ba554e4 2953 snprintf(state, sizeof(state), "%s-%u",
6954d432 2954 status_lookup[genpd->status], genpd->state_idx);
fc5cbf0c 2955 else
6954d432
GU
2956 snprintf(state, sizeof(state), "%s",
2957 status_lookup[genpd->status]);
2958 seq_printf(s, "%-30s %-15s ", genpd->name, state);
2bd5306a
MM
2959
2960 /*
2961 * Modifications on the list require holding locks on both
8d87ae48 2962 * parent and child, so we are safe.
66a5ca4b 2963 * Also genpd->name is immutable.
2bd5306a 2964 */
8d87ae48
KC
2965 list_for_each_entry(link, &genpd->parent_links, parent_node) {
2966 seq_printf(s, "%s", link->child->name);
2967 if (!list_is_last(&link->parent_node, &genpd->parent_links))
2bd5306a
MM
2968 seq_puts(s, ", ");
2969 }
2970
66a5ca4b 2971 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
d716f479
LI
2972 kobj_path = kobject_get_path(&pm_data->dev->kobj,
2973 genpd_is_irq_safe(genpd) ?
2974 GFP_ATOMIC : GFP_KERNEL);
2bd5306a
MM
2975 if (kobj_path == NULL)
2976 continue;
2977
2978 seq_printf(s, "\n %-50s ", kobj_path);
2979 rtpm_status_str(s, pm_data->dev);
2980 kfree(kobj_path);
2981 }
2982
2983 seq_puts(s, "\n");
2984exit:
35241d12 2985 genpd_unlock(genpd);
2bd5306a
MM
2986
2987 return 0;
2988}
2989
d32dcc6c 2990static int summary_show(struct seq_file *s, void *data)
2bd5306a 2991{
66a5ca4b 2992 struct generic_pm_domain *genpd;
2bd5306a
MM
2993 int ret = 0;
2994
8d87ae48 2995 seq_puts(s, "domain status children\n");
15dec67a 2996 seq_puts(s, " /device runtime status\n");
2bd5306a
MM
2997 seq_puts(s, "----------------------------------------------------------------------\n");
2998
2999 ret = mutex_lock_interruptible(&gpd_list_lock);
3000 if (ret)
3001 return -ERESTARTSYS;
3002
66a5ca4b 3003 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
9e9704ea 3004 ret = genpd_summary_one(s, genpd);
2bd5306a
MM
3005 if (ret)
3006 break;
3007 }
3008 mutex_unlock(&gpd_list_lock);
3009
3010 return ret;
3011}
3012
d32dcc6c 3013static int status_show(struct seq_file *s, void *data)
2bd5306a 3014{
b6a1d093 3015 static const char * const status_lookup[] = {
49f618e1
UH
3016 [GENPD_STATE_ON] = "on",
3017 [GENPD_STATE_OFF] = "off"
b6a1d093
TG
3018 };
3019
3020 struct generic_pm_domain *genpd = s->private;
3021 int ret = 0;
3022
3023 ret = genpd_lock_interruptible(genpd);
3024 if (ret)
3025 return -ERESTARTSYS;
3026
3027 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
3028 goto exit;
3029
49f618e1 3030 if (genpd->status == GENPD_STATE_OFF)
b6a1d093
TG
3031 seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
3032 genpd->state_idx);
3033 else
3034 seq_printf(s, "%s\n", status_lookup[genpd->status]);
3035exit:
3036 genpd_unlock(genpd);
3037 return ret;
2bd5306a
MM
3038}
3039
d32dcc6c 3040static int sub_domains_show(struct seq_file *s, void *data)
b6a1d093
TG
3041{
3042 struct generic_pm_domain *genpd = s->private;
3043 struct gpd_link *link;
3044 int ret = 0;
3045
3046 ret = genpd_lock_interruptible(genpd);
3047 if (ret)
3048 return -ERESTARTSYS;
3049
8d87ae48
KC
3050 list_for_each_entry(link, &genpd->parent_links, parent_node)
3051 seq_printf(s, "%s\n", link->child->name);
b6a1d093
TG
3052
3053 genpd_unlock(genpd);
3054 return ret;
3055}
3056
d32dcc6c 3057static int idle_states_show(struct seq_file *s, void *data)
b6a1d093
TG
3058{
3059 struct generic_pm_domain *genpd = s->private;
3060 unsigned int i;
3061 int ret = 0;
3062
3063 ret = genpd_lock_interruptible(genpd);
3064 if (ret)
3065 return -ERESTARTSYS;
3066
c6a113b5 3067 seq_puts(s, "State Time Spent(ms) Usage Rejected\n");
b6a1d093
TG
3068
3069 for (i = 0; i < genpd->state_count; i++) {
3070 ktime_t delta = 0;
3071 s64 msecs;
3072
49f618e1 3073 if ((genpd->status == GENPD_STATE_OFF) &&
b6a1d093
TG
3074 (genpd->state_idx == i))
3075 delta = ktime_sub(ktime_get(), genpd->accounting_time);
3076
3077 msecs = ktime_to_ms(
3078 ktime_add(genpd->states[i].idle_time, delta));
c6a113b5
LI
3079 seq_printf(s, "S%-13i %-14lld %-14llu %llu\n", i, msecs,
3080 genpd->states[i].usage, genpd->states[i].rejected);
b6a1d093
TG
3081 }
3082
3083 genpd_unlock(genpd);
3084 return ret;
3085}
3086
d32dcc6c 3087static int active_time_show(struct seq_file *s, void *data)
b6a1d093
TG
3088{
3089 struct generic_pm_domain *genpd = s->private;
3090 ktime_t delta = 0;
3091 int ret = 0;
3092
3093 ret = genpd_lock_interruptible(genpd);
3094 if (ret)
3095 return -ERESTARTSYS;
3096
49f618e1 3097 if (genpd->status == GENPD_STATE_ON)
b6a1d093
TG
3098 delta = ktime_sub(ktime_get(), genpd->accounting_time);
3099
3100 seq_printf(s, "%lld ms\n", ktime_to_ms(
3101 ktime_add(genpd->on_time, delta)));
3102
3103 genpd_unlock(genpd);
3104 return ret;
3105}
3106
d32dcc6c 3107static int total_idle_time_show(struct seq_file *s, void *data)
b6a1d093
TG
3108{
3109 struct generic_pm_domain *genpd = s->private;
3110 ktime_t delta = 0, total = 0;
3111 unsigned int i;
3112 int ret = 0;
3113
3114 ret = genpd_lock_interruptible(genpd);
3115 if (ret)
3116 return -ERESTARTSYS;
3117
3118 for (i = 0; i < genpd->state_count; i++) {
3119
49f618e1 3120 if ((genpd->status == GENPD_STATE_OFF) &&
b6a1d093
TG
3121 (genpd->state_idx == i))
3122 delta = ktime_sub(ktime_get(), genpd->accounting_time);
3123
3124 total = ktime_add(total, genpd->states[i].idle_time);
3125 }
3126 total = ktime_add(total, delta);
3127
3128 seq_printf(s, "%lld ms\n", ktime_to_ms(total));
3129
3130 genpd_unlock(genpd);
3131 return ret;
3132}
3133
3134
d32dcc6c 3135static int devices_show(struct seq_file *s, void *data)
b6a1d093
TG
3136{
3137 struct generic_pm_domain *genpd = s->private;
3138 struct pm_domain_data *pm_data;
3139 const char *kobj_path;
3140 int ret = 0;
3141
3142 ret = genpd_lock_interruptible(genpd);
3143 if (ret)
3144 return -ERESTARTSYS;
3145
3146 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3147 kobj_path = kobject_get_path(&pm_data->dev->kobj,
3148 genpd_is_irq_safe(genpd) ?
3149 GFP_ATOMIC : GFP_KERNEL);
3150 if (kobj_path == NULL)
3151 continue;
3152
3153 seq_printf(s, "%s\n", kobj_path);
3154 kfree(kobj_path);
3155 }
3156
3157 genpd_unlock(genpd);
3158 return ret;
3159}
3160
d32dcc6c 3161static int perf_state_show(struct seq_file *s, void *data)
e8912812
RN
3162{
3163 struct generic_pm_domain *genpd = s->private;
3164
3165 if (genpd_lock_interruptible(genpd))
3166 return -ERESTARTSYS;
3167
3168 seq_printf(s, "%u\n", genpd->performance_state);
3169
3170 genpd_unlock(genpd);
3171 return 0;
3172}
3173
d32dcc6c
YL
3174DEFINE_SHOW_ATTRIBUTE(summary);
3175DEFINE_SHOW_ATTRIBUTE(status);
3176DEFINE_SHOW_ATTRIBUTE(sub_domains);
3177DEFINE_SHOW_ATTRIBUTE(idle_states);
3178DEFINE_SHOW_ATTRIBUTE(active_time);
3179DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3180DEFINE_SHOW_ATTRIBUTE(devices);
3181DEFINE_SHOW_ATTRIBUTE(perf_state);
2bd5306a 3182
9e9704ea 3183static int __init genpd_debug_init(void)
2bd5306a
MM
3184{
3185 struct dentry *d;
b6a1d093 3186 struct generic_pm_domain *genpd;
2bd5306a 3187
9e9704ea 3188 genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
2bd5306a 3189
e16a42c3
GKH
3190 debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3191 NULL, &summary_fops);
2bd5306a 3192
b6a1d093 3193 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
9e9704ea 3194 d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
b6a1d093
TG
3195
3196 debugfs_create_file("current_state", 0444,
d32dcc6c 3197 d, genpd, &status_fops);
b6a1d093 3198 debugfs_create_file("sub_domains", 0444,
d32dcc6c 3199 d, genpd, &sub_domains_fops);
b6a1d093 3200 debugfs_create_file("idle_states", 0444,
d32dcc6c 3201 d, genpd, &idle_states_fops);
b6a1d093 3202 debugfs_create_file("active_time", 0444,
d32dcc6c 3203 d, genpd, &active_time_fops);
b6a1d093 3204 debugfs_create_file("total_idle_time", 0444,
d32dcc6c 3205 d, genpd, &total_idle_time_fops);
b6a1d093 3206 debugfs_create_file("devices", 0444,
d32dcc6c 3207 d, genpd, &devices_fops);
e8912812
RN
3208 if (genpd->set_performance_state)
3209 debugfs_create_file("perf_state", 0444,
d32dcc6c 3210 d, genpd, &perf_state_fops);
b6a1d093
TG
3211 }
3212
2bd5306a
MM
3213 return 0;
3214}
9e9704ea 3215late_initcall(genpd_debug_init);
2bd5306a 3216
9e9704ea 3217static void __exit genpd_debug_exit(void)
2bd5306a 3218{
9e9704ea 3219 debugfs_remove_recursive(genpd_debugfs_dir);
2bd5306a 3220}
9e9704ea 3221__exitcall(genpd_debug_exit);
8b0510b5 3222#endif /* CONFIG_DEBUG_FS */