2 * drivers/base/power/domain.c - Common code related to device power domains.
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6 * This file is released under the GPLv2.
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_opp.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/pm_domain.h>
16 #include <linux/pm_qos.h>
17 #include <linux/pm_clock.h>
18 #include <linux/slab.h>
19 #include <linux/err.h>
20 #include <linux/sched.h>
21 #include <linux/suspend.h>
22 #include <linux/export.h>
26 #define GENPD_RETRY_MAX_MS 250 /* Approximate */
28 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
30 type (*__routine)(struct device *__d); \
31 type __ret = (type)0; \
33 __routine = genpd->dev_ops.callback; \
35 __ret = __routine(dev); \
40 static LIST_HEAD(gpd_list);
41 static DEFINE_MUTEX(gpd_list_lock);
43 struct genpd_lock_ops {
44 void (*lock)(struct generic_pm_domain *genpd);
45 void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
46 int (*lock_interruptible)(struct generic_pm_domain *genpd);
47 void (*unlock)(struct generic_pm_domain *genpd);
50 static void genpd_lock_mtx(struct generic_pm_domain *genpd)
52 mutex_lock(&genpd->mlock);
55 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
58 mutex_lock_nested(&genpd->mlock, depth);
61 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
63 return mutex_lock_interruptible(&genpd->mlock);
66 static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
68 return mutex_unlock(&genpd->mlock);
71 static const struct genpd_lock_ops genpd_mtx_ops = {
72 .lock = genpd_lock_mtx,
73 .lock_nested = genpd_lock_nested_mtx,
74 .lock_interruptible = genpd_lock_interruptible_mtx,
75 .unlock = genpd_unlock_mtx,
78 static void genpd_lock_spin(struct generic_pm_domain *genpd)
79 __acquires(&genpd->slock)
83 spin_lock_irqsave(&genpd->slock, flags);
84 genpd->lock_flags = flags;
87 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
89 __acquires(&genpd->slock)
93 spin_lock_irqsave_nested(&genpd->slock, flags, depth);
94 genpd->lock_flags = flags;
97 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
98 __acquires(&genpd->slock)
102 spin_lock_irqsave(&genpd->slock, flags);
103 genpd->lock_flags = flags;
107 static void genpd_unlock_spin(struct generic_pm_domain *genpd)
108 __releases(&genpd->slock)
110 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
113 static const struct genpd_lock_ops genpd_spin_ops = {
114 .lock = genpd_lock_spin,
115 .lock_nested = genpd_lock_nested_spin,
116 .lock_interruptible = genpd_lock_interruptible_spin,
117 .unlock = genpd_unlock_spin,
120 #define genpd_lock(p) p->lock_ops->lock(p)
121 #define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
122 #define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
123 #define genpd_unlock(p) p->lock_ops->unlock(p)
125 #define genpd_status_on(genpd) (genpd->status == GPD_STATE_ACTIVE)
126 #define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
127 #define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
128 #define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
130 static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
131 const struct generic_pm_domain *genpd)
135 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
138 * Warn once if an IRQ safe device is attached to a no sleep domain, as
139 * to indicate a suboptimal configuration for PM. For an always on
140 * domain this isn't case, thus don't warn.
142 if (ret && !genpd_is_always_on(genpd))
143 dev_warn_once(dev, "PM domain %s will not be powered off\n",
150 * Get the generic PM domain for a particular struct device.
151 * This validates the struct device pointer, the PM domain pointer,
152 * and checks that the PM domain pointer is a real generic PM domain.
153 * Any failure results in NULL being returned.
155 static struct generic_pm_domain *genpd_lookup_dev(struct device *dev)
157 struct generic_pm_domain *genpd = NULL, *gpd;
159 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
162 mutex_lock(&gpd_list_lock);
163 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
164 if (&gpd->domain == dev->pm_domain) {
169 mutex_unlock(&gpd_list_lock);
175 * This should only be used where we are certain that the pm_domain
176 * attached to the device is a genpd domain.
178 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
180 if (IS_ERR_OR_NULL(dev->pm_domain))
181 return ERR_PTR(-EINVAL);
183 return pd_to_genpd(dev->pm_domain);
186 static int genpd_stop_dev(const struct generic_pm_domain *genpd,
189 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
192 static int genpd_start_dev(const struct generic_pm_domain *genpd,
195 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
198 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
202 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
203 ret = !!atomic_dec_and_test(&genpd->sd_count);
208 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
210 atomic_inc(&genpd->sd_count);
211 smp_mb__after_atomic();
214 #ifdef CONFIG_DEBUG_FS
215 static void genpd_update_accounting(struct generic_pm_domain *genpd)
220 delta = ktime_sub(now, genpd->accounting_time);
223 * If genpd->status is active, it means we are just
224 * out of off and so update the idle time and vice
227 if (genpd->status == GPD_STATE_ACTIVE) {
228 int state_idx = genpd->state_idx;
230 genpd->states[state_idx].idle_time =
231 ktime_add(genpd->states[state_idx].idle_time, delta);
233 genpd->on_time = ktime_add(genpd->on_time, delta);
236 genpd->accounting_time = now;
239 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
242 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
245 struct generic_pm_domain_data *pd_data;
246 struct pm_domain_data *pdd;
248 /* New requested state is same as Max requested state */
249 if (state == genpd->performance_state)
252 /* New requested state is higher than Max requested state */
253 if (state > genpd->performance_state)
256 /* Traverse all devices within the domain */
257 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
258 pd_data = to_gpd_data(pdd);
260 if (pd_data->performance_state > state)
261 state = pd_data->performance_state;
265 * We aren't propagating performance state changes of a subdomain to its
266 * masters as we don't have hardware that needs it. Over that, the
267 * performance states of subdomain and its masters may not have
268 * one-to-one mapping and would require additional information. We can
269 * get back to this once we have hardware that needs it. For that
270 * reason, we don't have to consider performance state of the subdomains
276 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
281 if (state == genpd->performance_state)
284 ret = genpd->set_performance_state(genpd, state);
288 genpd->performance_state = state;
293 * dev_pm_genpd_set_performance_state- Set performance state of device's power
296 * @dev: Device for which the performance-state needs to be set.
297 * @state: Target performance state of the device. This can be set as 0 when the
298 * device doesn't have any performance state constraints left (And so
299 * the device wouldn't participate anymore to find the target
300 * performance state of the genpd).
302 * It is assumed that the users guarantee that the genpd wouldn't be detached
303 * while this routine is getting called.
305 * Returns 0 on success and negative error values on failures.
307 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
309 struct generic_pm_domain *genpd;
310 struct generic_pm_domain_data *gpd_data;
314 genpd = dev_to_genpd(dev);
318 if (unlikely(!genpd->set_performance_state))
321 if (unlikely(!dev->power.subsys_data ||
322 !dev->power.subsys_data->domain_data)) {
329 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
330 prev = gpd_data->performance_state;
331 gpd_data->performance_state = state;
333 state = _genpd_reeval_performance_state(genpd, state);
334 ret = _genpd_set_performance_state(genpd, state);
336 gpd_data->performance_state = prev;
342 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
344 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
346 unsigned int state_idx = genpd->state_idx;
351 if (!genpd->power_on)
355 return genpd->power_on(genpd);
357 time_start = ktime_get();
358 ret = genpd->power_on(genpd);
362 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
363 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
366 genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
367 genpd->max_off_time_changed = true;
368 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
369 genpd->name, "on", elapsed_ns);
374 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
376 unsigned int state_idx = genpd->state_idx;
381 if (!genpd->power_off)
385 return genpd->power_off(genpd);
387 time_start = ktime_get();
388 ret = genpd->power_off(genpd);
392 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
393 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
396 genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
397 genpd->max_off_time_changed = true;
398 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
399 genpd->name, "off", elapsed_ns);
405 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
406 * @genpd: PM domain to power off.
408 * Queue up the execution of genpd_power_off() unless it's already been done
411 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
413 queue_work(pm_wq, &genpd->power_off_work);
417 * genpd_power_off - Remove power from a given PM domain.
418 * @genpd: PM domain to power down.
419 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
420 * RPM status of the releated device is in an intermediate state, not yet turned
421 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
422 * be RPM_SUSPENDED, while it tries to power off the PM domain.
424 * If all of the @genpd's devices have been suspended and all of its subdomains
425 * have been powered down, remove power from @genpd.
427 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
430 struct pm_domain_data *pdd;
431 struct gpd_link *link;
432 unsigned int not_suspended = 0;
435 * Do not try to power off the domain in the following situations:
436 * (1) The domain is already in the "power off" state.
437 * (2) System suspend is in progress.
439 if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
443 * Abort power off for the PM domain in the following situations:
444 * (1) The domain is configured as always on.
445 * (2) When the domain has a subdomain being powered on.
447 if (genpd_is_always_on(genpd) || atomic_read(&genpd->sd_count) > 0)
450 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
451 enum pm_qos_flags_status stat;
453 stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF);
454 if (stat > PM_QOS_FLAGS_NONE)
458 * Do not allow PM domain to be powered off, when an IRQ safe
459 * device is part of a non-IRQ safe domain.
461 if (!pm_runtime_suspended(pdd->dev) ||
462 irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
466 if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
469 if (genpd->gov && genpd->gov->power_down_ok) {
470 if (!genpd->gov->power_down_ok(&genpd->domain))
474 /* Default to shallowest state. */
476 genpd->state_idx = 0;
478 if (genpd->power_off) {
481 if (atomic_read(&genpd->sd_count) > 0)
485 * If sd_count > 0 at this point, one of the subdomains hasn't
486 * managed to call genpd_power_on() for the master yet after
487 * incrementing it. In that case genpd_power_on() will wait
488 * for us to drop the lock, so we can call .power_off() and let
489 * the genpd_power_on() restore power for us (this shouldn't
490 * happen very often).
492 ret = _genpd_power_off(genpd, true);
497 genpd->status = GPD_STATE_POWER_OFF;
498 genpd_update_accounting(genpd);
500 list_for_each_entry(link, &genpd->slave_links, slave_node) {
501 genpd_sd_counter_dec(link->master);
502 genpd_lock_nested(link->master, depth + 1);
503 genpd_power_off(link->master, false, depth + 1);
504 genpd_unlock(link->master);
511 * genpd_power_on - Restore power to a given PM domain and its masters.
512 * @genpd: PM domain to power up.
513 * @depth: nesting count for lockdep.
515 * Restore power to @genpd and all of its masters so that it is possible to
516 * resume a device belonging to it.
518 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
520 struct gpd_link *link;
523 if (genpd_status_on(genpd))
527 * The list is guaranteed not to change while the loop below is being
528 * executed, unless one of the masters' .power_on() callbacks fiddles
531 list_for_each_entry(link, &genpd->slave_links, slave_node) {
532 struct generic_pm_domain *master = link->master;
534 genpd_sd_counter_inc(master);
536 genpd_lock_nested(master, depth + 1);
537 ret = genpd_power_on(master, depth + 1);
538 genpd_unlock(master);
541 genpd_sd_counter_dec(master);
546 ret = _genpd_power_on(genpd, true);
550 genpd->status = GPD_STATE_ACTIVE;
551 genpd_update_accounting(genpd);
556 list_for_each_entry_continue_reverse(link,
559 genpd_sd_counter_dec(link->master);
560 genpd_lock_nested(link->master, depth + 1);
561 genpd_power_off(link->master, false, depth + 1);
562 genpd_unlock(link->master);
568 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
569 unsigned long val, void *ptr)
571 struct generic_pm_domain_data *gpd_data;
574 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
575 dev = gpd_data->base.dev;
578 struct generic_pm_domain *genpd;
579 struct pm_domain_data *pdd;
581 spin_lock_irq(&dev->power.lock);
583 pdd = dev->power.subsys_data ?
584 dev->power.subsys_data->domain_data : NULL;
586 to_gpd_data(pdd)->td.constraint_changed = true;
587 genpd = dev_to_genpd(dev);
589 genpd = ERR_PTR(-ENODATA);
592 spin_unlock_irq(&dev->power.lock);
594 if (!IS_ERR(genpd)) {
596 genpd->max_off_time_changed = true;
601 if (!dev || dev->power.ignore_children)
609 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
610 * @work: Work structure used for scheduling the execution of this function.
612 static void genpd_power_off_work_fn(struct work_struct *work)
614 struct generic_pm_domain *genpd;
616 genpd = container_of(work, struct generic_pm_domain, power_off_work);
619 genpd_power_off(genpd, false, 0);
624 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
625 * @dev: Device to handle.
627 static int __genpd_runtime_suspend(struct device *dev)
629 int (*cb)(struct device *__dev);
631 if (dev->type && dev->type->pm)
632 cb = dev->type->pm->runtime_suspend;
633 else if (dev->class && dev->class->pm)
634 cb = dev->class->pm->runtime_suspend;
635 else if (dev->bus && dev->bus->pm)
636 cb = dev->bus->pm->runtime_suspend;
640 if (!cb && dev->driver && dev->driver->pm)
641 cb = dev->driver->pm->runtime_suspend;
643 return cb ? cb(dev) : 0;
647 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
648 * @dev: Device to handle.
650 static int __genpd_runtime_resume(struct device *dev)
652 int (*cb)(struct device *__dev);
654 if (dev->type && dev->type->pm)
655 cb = dev->type->pm->runtime_resume;
656 else if (dev->class && dev->class->pm)
657 cb = dev->class->pm->runtime_resume;
658 else if (dev->bus && dev->bus->pm)
659 cb = dev->bus->pm->runtime_resume;
663 if (!cb && dev->driver && dev->driver->pm)
664 cb = dev->driver->pm->runtime_resume;
666 return cb ? cb(dev) : 0;
670 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
671 * @dev: Device to suspend.
673 * Carry out a runtime suspend of a device under the assumption that its
674 * pm_domain field points to the domain member of an object of type
675 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
677 static int genpd_runtime_suspend(struct device *dev)
679 struct generic_pm_domain *genpd;
680 bool (*suspend_ok)(struct device *__dev);
681 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
682 bool runtime_pm = pm_runtime_enabled(dev);
687 dev_dbg(dev, "%s()\n", __func__);
689 genpd = dev_to_genpd(dev);
694 * A runtime PM centric subsystem/driver may re-use the runtime PM
695 * callbacks for other purposes than runtime PM. In those scenarios
696 * runtime PM is disabled. Under these circumstances, we shall skip
697 * validating/measuring the PM QoS latency.
699 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
700 if (runtime_pm && suspend_ok && !suspend_ok(dev))
703 /* Measure suspend latency. */
706 time_start = ktime_get();
708 ret = __genpd_runtime_suspend(dev);
712 ret = genpd_stop_dev(genpd, dev);
714 __genpd_runtime_resume(dev);
718 /* Update suspend latency value if the measured time exceeds it. */
720 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
721 if (elapsed_ns > td->suspend_latency_ns) {
722 td->suspend_latency_ns = elapsed_ns;
723 dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
725 genpd->max_off_time_changed = true;
726 td->constraint_changed = true;
731 * If power.irq_safe is set, this routine may be run with
732 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
734 if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
738 genpd_power_off(genpd, true, 0);
745 * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
746 * @dev: Device to resume.
748 * Carry out a runtime resume of a device under the assumption that its
749 * pm_domain field points to the domain member of an object of type
750 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
752 static int genpd_runtime_resume(struct device *dev)
754 struct generic_pm_domain *genpd;
755 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
756 bool runtime_pm = pm_runtime_enabled(dev);
762 dev_dbg(dev, "%s()\n", __func__);
764 genpd = dev_to_genpd(dev);
769 * As we don't power off a non IRQ safe domain, which holds
770 * an IRQ safe device, we don't need to restore power to it.
772 if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
778 ret = genpd_power_on(genpd, 0);
785 /* Measure resume latency. */
787 if (timed && runtime_pm)
788 time_start = ktime_get();
790 ret = genpd_start_dev(genpd, dev);
794 ret = __genpd_runtime_resume(dev);
798 /* Update resume latency value if the measured time exceeds it. */
799 if (timed && runtime_pm) {
800 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
801 if (elapsed_ns > td->resume_latency_ns) {
802 td->resume_latency_ns = elapsed_ns;
803 dev_dbg(dev, "resume latency exceeded, %lld ns\n",
805 genpd->max_off_time_changed = true;
806 td->constraint_changed = true;
813 genpd_stop_dev(genpd, dev);
815 if (!pm_runtime_is_irq_safe(dev) ||
816 (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
818 genpd_power_off(genpd, true, 0);
825 static bool pd_ignore_unused;
826 static int __init pd_ignore_unused_setup(char *__unused)
828 pd_ignore_unused = true;
831 __setup("pd_ignore_unused", pd_ignore_unused_setup);
834 * genpd_power_off_unused - Power off all PM domains with no devices in use.
836 static int __init genpd_power_off_unused(void)
838 struct generic_pm_domain *genpd;
840 if (pd_ignore_unused) {
841 pr_warn("genpd: Not disabling unused power domains\n");
845 mutex_lock(&gpd_list_lock);
847 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
848 genpd_queue_power_off_work(genpd);
850 mutex_unlock(&gpd_list_lock);
854 late_initcall(genpd_power_off_unused);
856 #if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
858 static bool genpd_present(const struct generic_pm_domain *genpd)
860 const struct generic_pm_domain *gpd;
862 if (IS_ERR_OR_NULL(genpd))
865 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
874 #ifdef CONFIG_PM_SLEEP
877 * genpd_sync_power_off - Synchronously power off a PM domain and its masters.
878 * @genpd: PM domain to power off, if possible.
879 * @use_lock: use the lock.
880 * @depth: nesting count for lockdep.
882 * Check if the given PM domain can be powered off (during system suspend or
883 * hibernation) and do that if so. Also, in that case propagate to its masters.
885 * This function is only called in "noirq" and "syscore" stages of system power
886 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
887 * these cases the lock must be held.
889 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
892 struct gpd_link *link;
894 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
897 if (genpd->suspended_count != genpd->device_count
898 || atomic_read(&genpd->sd_count) > 0)
901 /* Choose the deepest state when suspending */
902 genpd->state_idx = genpd->state_count - 1;
903 if (_genpd_power_off(genpd, false))
906 genpd->status = GPD_STATE_POWER_OFF;
908 list_for_each_entry(link, &genpd->slave_links, slave_node) {
909 genpd_sd_counter_dec(link->master);
912 genpd_lock_nested(link->master, depth + 1);
914 genpd_sync_power_off(link->master, use_lock, depth + 1);
917 genpd_unlock(link->master);
922 * genpd_sync_power_on - Synchronously power on a PM domain and its masters.
923 * @genpd: PM domain to power on.
924 * @use_lock: use the lock.
925 * @depth: nesting count for lockdep.
927 * This function is only called in "noirq" and "syscore" stages of system power
928 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
929 * these cases the lock must be held.
931 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
934 struct gpd_link *link;
936 if (genpd_status_on(genpd))
939 list_for_each_entry(link, &genpd->slave_links, slave_node) {
940 genpd_sd_counter_inc(link->master);
943 genpd_lock_nested(link->master, depth + 1);
945 genpd_sync_power_on(link->master, use_lock, depth + 1);
948 genpd_unlock(link->master);
951 _genpd_power_on(genpd, false);
953 genpd->status = GPD_STATE_ACTIVE;
957 * resume_needed - Check whether to resume a device before system suspend.
958 * @dev: Device to check.
959 * @genpd: PM domain the device belongs to.
961 * There are two cases in which a device that can wake up the system from sleep
962 * states should be resumed by genpd_prepare(): (1) if the device is enabled
963 * to wake up the system and it has to remain active for this purpose while the
964 * system is in the sleep state and (2) if the device is not enabled to wake up
965 * the system from sleep states and it generally doesn't generate wakeup signals
966 * by itself (those signals are generated on its behalf by other parts of the
967 * system). In the latter case it may be necessary to reconfigure the device's
968 * wakeup settings during system suspend, because it may have been set up to
969 * signal remote wakeup from the system's working state as needed by runtime PM.
970 * Return 'true' in either of the above cases.
972 static bool resume_needed(struct device *dev,
973 const struct generic_pm_domain *genpd)
977 if (!device_can_wakeup(dev))
980 active_wakeup = genpd_is_active_wakeup(genpd);
981 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
985 * genpd_prepare - Start power transition of a device in a PM domain.
986 * @dev: Device to start the transition of.
988 * Start a power transition of a device (during a system-wide power transition)
989 * under the assumption that its pm_domain field points to the domain member of
990 * an object of type struct generic_pm_domain representing a PM domain
991 * consisting of I/O devices.
993 static int genpd_prepare(struct device *dev)
995 struct generic_pm_domain *genpd;
998 dev_dbg(dev, "%s()\n", __func__);
1000 genpd = dev_to_genpd(dev);
1005 * If a wakeup request is pending for the device, it should be woken up
1006 * at this point and a system wakeup event should be reported if it's
1007 * set up to wake up the system from sleep states.
1009 if (resume_needed(dev, genpd))
1010 pm_runtime_resume(dev);
1014 if (genpd->prepared_count++ == 0)
1015 genpd->suspended_count = 0;
1017 genpd_unlock(genpd);
1019 ret = pm_generic_prepare(dev);
1023 genpd->prepared_count--;
1025 genpd_unlock(genpd);
1028 /* Never return 1, as genpd don't cope with the direct_complete path. */
1029 return ret >= 0 ? 0 : ret;
1033 * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1035 * @dev: Device to suspend.
1036 * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback.
1038 * Stop the device and remove power from the domain if all devices in it have
1041 static int genpd_finish_suspend(struct device *dev, bool poweroff)
1043 struct generic_pm_domain *genpd;
1046 genpd = dev_to_genpd(dev);
1051 ret = pm_generic_poweroff_noirq(dev);
1053 ret = pm_generic_suspend_noirq(dev);
1057 if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
1060 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1061 !pm_runtime_status_suspended(dev)) {
1062 ret = genpd_stop_dev(genpd, dev);
1065 pm_generic_restore_noirq(dev);
1067 pm_generic_resume_noirq(dev);
1073 genpd->suspended_count++;
1074 genpd_sync_power_off(genpd, true, 0);
1075 genpd_unlock(genpd);
1081 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1082 * @dev: Device to suspend.
1084 * Stop the device and remove power from the domain if all devices in it have
1087 static int genpd_suspend_noirq(struct device *dev)
1089 dev_dbg(dev, "%s()\n", __func__);
1091 return genpd_finish_suspend(dev, false);
1095 * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1096 * @dev: Device to resume.
1098 * Restore power to the device's PM domain, if necessary, and start the device.
1100 static int genpd_resume_noirq(struct device *dev)
1102 struct generic_pm_domain *genpd;
1105 dev_dbg(dev, "%s()\n", __func__);
1107 genpd = dev_to_genpd(dev);
1111 if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
1112 return pm_generic_resume_noirq(dev);
1115 genpd_sync_power_on(genpd, true, 0);
1116 genpd->suspended_count--;
1117 genpd_unlock(genpd);
1119 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1120 !pm_runtime_status_suspended(dev)) {
1121 ret = genpd_start_dev(genpd, dev);
1126 return pm_generic_resume_noirq(dev);
1130 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1131 * @dev: Device to freeze.
1133 * Carry out a late freeze of a device under the assumption that its
1134 * pm_domain field points to the domain member of an object of type
1135 * struct generic_pm_domain representing a power domain consisting of I/O
1138 static int genpd_freeze_noirq(struct device *dev)
1140 const struct generic_pm_domain *genpd;
1143 dev_dbg(dev, "%s()\n", __func__);
1145 genpd = dev_to_genpd(dev);
1149 ret = pm_generic_freeze_noirq(dev);
1153 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1154 !pm_runtime_status_suspended(dev))
1155 ret = genpd_stop_dev(genpd, dev);
1161 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1162 * @dev: Device to thaw.
1164 * Start the device, unless power has been removed from the domain already
1165 * before the system transition.
1167 static int genpd_thaw_noirq(struct device *dev)
1169 const struct generic_pm_domain *genpd;
1172 dev_dbg(dev, "%s()\n", __func__);
1174 genpd = dev_to_genpd(dev);
1178 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1179 !pm_runtime_status_suspended(dev)) {
1180 ret = genpd_start_dev(genpd, dev);
1185 return pm_generic_thaw_noirq(dev);
1189 * genpd_poweroff_noirq - Completion of hibernation of device in an
1191 * @dev: Device to poweroff.
1193 * Stop the device and remove power from the domain if all devices in it have
1196 static int genpd_poweroff_noirq(struct device *dev)
1198 dev_dbg(dev, "%s()\n", __func__);
1200 return genpd_finish_suspend(dev, true);
1204 * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1205 * @dev: Device to resume.
1207 * Make sure the domain will be in the same power state as before the
1208 * hibernation the system is resuming from and start the device if necessary.
1210 static int genpd_restore_noirq(struct device *dev)
1212 struct generic_pm_domain *genpd;
1215 dev_dbg(dev, "%s()\n", __func__);
1217 genpd = dev_to_genpd(dev);
1222 * At this point suspended_count == 0 means we are being run for the
1223 * first time for the given domain in the present cycle.
1226 if (genpd->suspended_count++ == 0)
1228 * The boot kernel might put the domain into arbitrary state,
1229 * so make it appear as powered off to genpd_sync_power_on(),
1230 * so that it tries to power it on in case it was really off.
1232 genpd->status = GPD_STATE_POWER_OFF;
1234 genpd_sync_power_on(genpd, true, 0);
1235 genpd_unlock(genpd);
1237 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1238 !pm_runtime_status_suspended(dev)) {
1239 ret = genpd_start_dev(genpd, dev);
1244 return pm_generic_restore_noirq(dev);
1248 * genpd_complete - Complete power transition of a device in a power domain.
1249 * @dev: Device to complete the transition of.
1251 * Complete a power transition of a device (during a system-wide power
1252 * transition) under the assumption that its pm_domain field points to the
1253 * domain member of an object of type struct generic_pm_domain representing
1254 * a power domain consisting of I/O devices.
1256 static void genpd_complete(struct device *dev)
1258 struct generic_pm_domain *genpd;
1260 dev_dbg(dev, "%s()\n", __func__);
1262 genpd = dev_to_genpd(dev);
1266 pm_generic_complete(dev);
1270 genpd->prepared_count--;
1271 if (!genpd->prepared_count)
1272 genpd_queue_power_off_work(genpd);
1274 genpd_unlock(genpd);
1278 * genpd_syscore_switch - Switch power during system core suspend or resume.
1279 * @dev: Device that normally is marked as "always on" to switch power for.
1281 * This routine may only be called during the system core (syscore) suspend or
1282 * resume phase for devices whose "always on" flags are set.
1284 static void genpd_syscore_switch(struct device *dev, bool suspend)
1286 struct generic_pm_domain *genpd;
1288 genpd = dev_to_genpd(dev);
1289 if (!genpd_present(genpd))
1293 genpd->suspended_count++;
1294 genpd_sync_power_off(genpd, false, 0);
1296 genpd_sync_power_on(genpd, false, 0);
1297 genpd->suspended_count--;
1301 void pm_genpd_syscore_poweroff(struct device *dev)
1303 genpd_syscore_switch(dev, true);
1305 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1307 void pm_genpd_syscore_poweron(struct device *dev)
1309 genpd_syscore_switch(dev, false);
1311 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1313 #else /* !CONFIG_PM_SLEEP */
1315 #define genpd_prepare NULL
1316 #define genpd_suspend_noirq NULL
1317 #define genpd_resume_noirq NULL
1318 #define genpd_freeze_noirq NULL
1319 #define genpd_thaw_noirq NULL
1320 #define genpd_poweroff_noirq NULL
1321 #define genpd_restore_noirq NULL
1322 #define genpd_complete NULL
1324 #endif /* CONFIG_PM_SLEEP */
1326 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1327 struct gpd_timing_data *td)
1329 struct generic_pm_domain_data *gpd_data;
1332 ret = dev_pm_get_subsys_data(dev);
1334 return ERR_PTR(ret);
1336 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1345 gpd_data->base.dev = dev;
1346 gpd_data->td.constraint_changed = true;
1347 gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1348 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1350 spin_lock_irq(&dev->power.lock);
1352 if (dev->power.subsys_data->domain_data) {
1357 dev->power.subsys_data->domain_data = &gpd_data->base;
1359 spin_unlock_irq(&dev->power.lock);
1364 spin_unlock_irq(&dev->power.lock);
1367 dev_pm_put_subsys_data(dev);
1368 return ERR_PTR(ret);
1371 static void genpd_free_dev_data(struct device *dev,
1372 struct generic_pm_domain_data *gpd_data)
1374 spin_lock_irq(&dev->power.lock);
1376 dev->power.subsys_data->domain_data = NULL;
1378 spin_unlock_irq(&dev->power.lock);
1381 dev_pm_put_subsys_data(dev);
1384 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1385 struct gpd_timing_data *td)
1387 struct generic_pm_domain_data *gpd_data;
1390 dev_dbg(dev, "%s()\n", __func__);
1392 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1395 gpd_data = genpd_alloc_dev_data(dev, td);
1396 if (IS_ERR(gpd_data))
1397 return PTR_ERR(gpd_data);
1401 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1405 dev_pm_domain_set(dev, &genpd->domain);
1407 genpd->device_count++;
1408 genpd->max_off_time_changed = true;
1410 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1413 genpd_unlock(genpd);
1416 genpd_free_dev_data(dev, gpd_data);
1418 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1424 * pm_genpd_add_device - Add a device to an I/O PM domain.
1425 * @genpd: PM domain to add the device to.
1426 * @dev: Device to be added.
1428 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1432 mutex_lock(&gpd_list_lock);
1433 ret = genpd_add_device(genpd, dev, NULL);
1434 mutex_unlock(&gpd_list_lock);
1438 EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1440 static int genpd_remove_device(struct generic_pm_domain *genpd,
1443 struct generic_pm_domain_data *gpd_data;
1444 struct pm_domain_data *pdd;
1447 dev_dbg(dev, "%s()\n", __func__);
1449 pdd = dev->power.subsys_data->domain_data;
1450 gpd_data = to_gpd_data(pdd);
1451 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1455 if (genpd->prepared_count > 0) {
1460 genpd->device_count--;
1461 genpd->max_off_time_changed = true;
1463 if (genpd->detach_dev)
1464 genpd->detach_dev(genpd, dev);
1466 dev_pm_domain_set(dev, NULL);
1468 list_del_init(&pdd->list_node);
1470 genpd_unlock(genpd);
1472 genpd_free_dev_data(dev, gpd_data);
1477 genpd_unlock(genpd);
1478 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1484 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1485 * @dev: Device to be removed.
1487 int pm_genpd_remove_device(struct device *dev)
1489 struct generic_pm_domain *genpd = genpd_lookup_dev(dev);
1494 return genpd_remove_device(genpd, dev);
1496 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1498 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1499 struct generic_pm_domain *subdomain)
1501 struct gpd_link *link, *itr;
1504 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1505 || genpd == subdomain)
1509 * If the domain can be powered on/off in an IRQ safe
1510 * context, ensure that the subdomain can also be
1511 * powered on/off in that context.
1513 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1514 WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1515 genpd->name, subdomain->name);
1519 link = kzalloc(sizeof(*link), GFP_KERNEL);
1523 genpd_lock(subdomain);
1524 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1526 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1531 list_for_each_entry(itr, &genpd->master_links, master_node) {
1532 if (itr->slave == subdomain && itr->master == genpd) {
1538 link->master = genpd;
1539 list_add_tail(&link->master_node, &genpd->master_links);
1540 link->slave = subdomain;
1541 list_add_tail(&link->slave_node, &subdomain->slave_links);
1542 if (genpd_status_on(subdomain))
1543 genpd_sd_counter_inc(genpd);
1546 genpd_unlock(genpd);
1547 genpd_unlock(subdomain);
1554 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1555 * @genpd: Master PM domain to add the subdomain to.
1556 * @subdomain: Subdomain to be added.
1558 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1559 struct generic_pm_domain *subdomain)
1563 mutex_lock(&gpd_list_lock);
1564 ret = genpd_add_subdomain(genpd, subdomain);
1565 mutex_unlock(&gpd_list_lock);
1569 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1572 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1573 * @genpd: Master PM domain to remove the subdomain from.
1574 * @subdomain: Subdomain to be removed.
1576 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1577 struct generic_pm_domain *subdomain)
1579 struct gpd_link *l, *link;
1582 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1585 genpd_lock(subdomain);
1586 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1588 if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
1589 pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
1595 list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
1596 if (link->slave != subdomain)
1599 list_del(&link->master_node);
1600 list_del(&link->slave_node);
1602 if (genpd_status_on(subdomain))
1603 genpd_sd_counter_dec(genpd);
1610 genpd_unlock(genpd);
1611 genpd_unlock(subdomain);
1615 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1617 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1619 struct genpd_power_state *state;
1621 state = kzalloc(sizeof(*state), GFP_KERNEL);
1625 genpd->states = state;
1626 genpd->state_count = 1;
1627 genpd->free = state;
1632 static void genpd_lock_init(struct generic_pm_domain *genpd)
1634 if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
1635 spin_lock_init(&genpd->slock);
1636 genpd->lock_ops = &genpd_spin_ops;
1638 mutex_init(&genpd->mlock);
1639 genpd->lock_ops = &genpd_mtx_ops;
1644 * pm_genpd_init - Initialize a generic I/O PM domain object.
1645 * @genpd: PM domain object to initialize.
1646 * @gov: PM domain governor to associate with the domain (may be NULL).
1647 * @is_off: Initial value of the domain's power_is_off field.
1649 * Returns 0 on successful initialization, else a negative error code.
1651 int pm_genpd_init(struct generic_pm_domain *genpd,
1652 struct dev_power_governor *gov, bool is_off)
1656 if (IS_ERR_OR_NULL(genpd))
1659 INIT_LIST_HEAD(&genpd->master_links);
1660 INIT_LIST_HEAD(&genpd->slave_links);
1661 INIT_LIST_HEAD(&genpd->dev_list);
1662 genpd_lock_init(genpd);
1664 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1665 atomic_set(&genpd->sd_count, 0);
1666 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1667 genpd->device_count = 0;
1668 genpd->max_off_time_ns = -1;
1669 genpd->max_off_time_changed = true;
1670 genpd->provider = NULL;
1671 genpd->has_provider = false;
1672 genpd->accounting_time = ktime_get();
1673 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
1674 genpd->domain.ops.runtime_resume = genpd_runtime_resume;
1675 genpd->domain.ops.prepare = genpd_prepare;
1676 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
1677 genpd->domain.ops.resume_noirq = genpd_resume_noirq;
1678 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
1679 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
1680 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
1681 genpd->domain.ops.restore_noirq = genpd_restore_noirq;
1682 genpd->domain.ops.complete = genpd_complete;
1684 if (genpd->flags & GENPD_FLAG_PM_CLK) {
1685 genpd->dev_ops.stop = pm_clk_suspend;
1686 genpd->dev_ops.start = pm_clk_resume;
1689 /* Always-on domains must be powered on at initialization. */
1690 if (genpd_is_always_on(genpd) && !genpd_status_on(genpd))
1693 /* Use only one "off" state if there were no states declared */
1694 if (genpd->state_count == 0) {
1695 ret = genpd_set_default_power_state(genpd);
1699 pr_warn("%s : no governor for states\n", genpd->name);
1702 device_initialize(&genpd->dev);
1703 dev_set_name(&genpd->dev, "%s", genpd->name);
1705 mutex_lock(&gpd_list_lock);
1706 list_add(&genpd->gpd_list_node, &gpd_list);
1707 mutex_unlock(&gpd_list_lock);
1711 EXPORT_SYMBOL_GPL(pm_genpd_init);
1713 static int genpd_remove(struct generic_pm_domain *genpd)
1715 struct gpd_link *l, *link;
1717 if (IS_ERR_OR_NULL(genpd))
1722 if (genpd->has_provider) {
1723 genpd_unlock(genpd);
1724 pr_err("Provider present, unable to remove %s\n", genpd->name);
1728 if (!list_empty(&genpd->master_links) || genpd->device_count) {
1729 genpd_unlock(genpd);
1730 pr_err("%s: unable to remove %s\n", __func__, genpd->name);
1734 list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
1735 list_del(&link->master_node);
1736 list_del(&link->slave_node);
1740 list_del(&genpd->gpd_list_node);
1741 genpd_unlock(genpd);
1742 cancel_work_sync(&genpd->power_off_work);
1744 pr_debug("%s: removed %s\n", __func__, genpd->name);
1750 * pm_genpd_remove - Remove a generic I/O PM domain
1751 * @genpd: Pointer to PM domain that is to be removed.
1753 * To remove the PM domain, this function:
1754 * - Removes the PM domain as a subdomain to any parent domains,
1756 * - Removes the PM domain from the list of registered PM domains.
1758 * The PM domain will only be removed, if the associated provider has
1759 * been removed, it is not a parent to any other PM domain and has no
1760 * devices associated with it.
1762 int pm_genpd_remove(struct generic_pm_domain *genpd)
1766 mutex_lock(&gpd_list_lock);
1767 ret = genpd_remove(genpd);
1768 mutex_unlock(&gpd_list_lock);
1772 EXPORT_SYMBOL_GPL(pm_genpd_remove);
1774 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1777 * Device Tree based PM domain providers.
1779 * The code below implements generic device tree based PM domain providers that
1780 * bind device tree nodes with generic PM domains registered in the system.
1782 * Any driver that registers generic PM domains and needs to support binding of
1783 * devices to these domains is supposed to register a PM domain provider, which
1784 * maps a PM domain specifier retrieved from the device tree to a PM domain.
1786 * Two simple mapping functions have been provided for convenience:
1787 * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1788 * - genpd_xlate_onecell() for mapping of multiple PM domains per node by
1793 * struct of_genpd_provider - PM domain provider registration structure
1794 * @link: Entry in global list of PM domain providers
1795 * @node: Pointer to device tree node of PM domain provider
1796 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1798 * @data: context pointer to be passed into @xlate callback
1800 struct of_genpd_provider {
1801 struct list_head link;
1802 struct device_node *node;
1803 genpd_xlate_t xlate;
1807 /* List of registered PM domain providers. */
1808 static LIST_HEAD(of_genpd_providers);
1809 /* Mutex to protect the list above. */
1810 static DEFINE_MUTEX(of_genpd_mutex);
1813 * genpd_xlate_simple() - Xlate function for direct node-domain mapping
1814 * @genpdspec: OF phandle args to map into a PM domain
1815 * @data: xlate function private data - pointer to struct generic_pm_domain
1817 * This is a generic xlate function that can be used to model PM domains that
1818 * have their own device tree nodes. The private data of xlate function needs
1819 * to be a valid pointer to struct generic_pm_domain.
1821 static struct generic_pm_domain *genpd_xlate_simple(
1822 struct of_phandle_args *genpdspec,
1829 * genpd_xlate_onecell() - Xlate function using a single index.
1830 * @genpdspec: OF phandle args to map into a PM domain
1831 * @data: xlate function private data - pointer to struct genpd_onecell_data
1833 * This is a generic xlate function that can be used to model simple PM domain
1834 * controllers that have one device tree node and provide multiple PM domains.
1835 * A single cell is used as an index into an array of PM domains specified in
1836 * the genpd_onecell_data struct when registering the provider.
1838 static struct generic_pm_domain *genpd_xlate_onecell(
1839 struct of_phandle_args *genpdspec,
1842 struct genpd_onecell_data *genpd_data = data;
1843 unsigned int idx = genpdspec->args[0];
1845 if (genpdspec->args_count != 1)
1846 return ERR_PTR(-EINVAL);
1848 if (idx >= genpd_data->num_domains) {
1849 pr_err("%s: invalid domain index %u\n", __func__, idx);
1850 return ERR_PTR(-EINVAL);
1853 if (!genpd_data->domains[idx])
1854 return ERR_PTR(-ENOENT);
1856 return genpd_data->domains[idx];
1860 * genpd_add_provider() - Register a PM domain provider for a node
1861 * @np: Device node pointer associated with the PM domain provider.
1862 * @xlate: Callback for decoding PM domain from phandle arguments.
1863 * @data: Context pointer for @xlate callback.
1865 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
1868 struct of_genpd_provider *cp;
1870 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1874 cp->node = of_node_get(np);
1878 mutex_lock(&of_genpd_mutex);
1879 list_add(&cp->link, &of_genpd_providers);
1880 mutex_unlock(&of_genpd_mutex);
1881 pr_debug("Added domain provider from %pOF\n", np);
1887 * of_genpd_add_provider_simple() - Register a simple PM domain provider
1888 * @np: Device node pointer associated with the PM domain provider.
1889 * @genpd: Pointer to PM domain associated with the PM domain provider.
1891 int of_genpd_add_provider_simple(struct device_node *np,
1892 struct generic_pm_domain *genpd)
1899 mutex_lock(&gpd_list_lock);
1901 if (!genpd_present(genpd))
1904 genpd->dev.of_node = np;
1906 /* Parse genpd OPP table */
1907 if (genpd->set_performance_state) {
1908 ret = dev_pm_opp_of_add_table(&genpd->dev);
1910 dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
1916 * Save table for faster processing while setting performance
1919 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
1920 WARN_ON(!genpd->opp_table);
1923 ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
1925 if (genpd->set_performance_state) {
1926 dev_pm_opp_put_opp_table(genpd->opp_table);
1927 dev_pm_opp_of_remove_table(&genpd->dev);
1933 genpd->provider = &np->fwnode;
1934 genpd->has_provider = true;
1937 mutex_unlock(&gpd_list_lock);
1941 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
1944 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
1945 * @np: Device node pointer associated with the PM domain provider.
1946 * @data: Pointer to the data associated with the PM domain provider.
1948 int of_genpd_add_provider_onecell(struct device_node *np,
1949 struct genpd_onecell_data *data)
1951 struct generic_pm_domain *genpd;
1958 mutex_lock(&gpd_list_lock);
1961 data->xlate = genpd_xlate_onecell;
1963 for (i = 0; i < data->num_domains; i++) {
1964 genpd = data->domains[i];
1968 if (!genpd_present(genpd))
1971 genpd->dev.of_node = np;
1973 /* Parse genpd OPP table */
1974 if (genpd->set_performance_state) {
1975 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
1977 dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
1983 * Save table for faster processing while setting
1984 * performance state.
1986 genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
1987 WARN_ON(!genpd->opp_table);
1990 genpd->provider = &np->fwnode;
1991 genpd->has_provider = true;
1994 ret = genpd_add_provider(np, data->xlate, data);
1998 mutex_unlock(&gpd_list_lock);
2004 genpd = data->domains[i];
2009 genpd->provider = NULL;
2010 genpd->has_provider = false;
2012 if (genpd->set_performance_state) {
2013 dev_pm_opp_put_opp_table(genpd->opp_table);
2014 dev_pm_opp_of_remove_table(&genpd->dev);
2018 mutex_unlock(&gpd_list_lock);
2022 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2025 * of_genpd_del_provider() - Remove a previously registered PM domain provider
2026 * @np: Device node pointer associated with the PM domain provider
2028 void of_genpd_del_provider(struct device_node *np)
2030 struct of_genpd_provider *cp, *tmp;
2031 struct generic_pm_domain *gpd;
2033 mutex_lock(&gpd_list_lock);
2034 mutex_lock(&of_genpd_mutex);
2035 list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2036 if (cp->node == np) {
2038 * For each PM domain associated with the
2039 * provider, set the 'has_provider' to false
2040 * so that the PM domain can be safely removed.
2042 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2043 if (gpd->provider == &np->fwnode) {
2044 gpd->has_provider = false;
2046 if (!gpd->set_performance_state)
2049 dev_pm_opp_put_opp_table(gpd->opp_table);
2050 dev_pm_opp_of_remove_table(&gpd->dev);
2054 list_del(&cp->link);
2055 of_node_put(cp->node);
2060 mutex_unlock(&of_genpd_mutex);
2061 mutex_unlock(&gpd_list_lock);
2063 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2066 * genpd_get_from_provider() - Look-up PM domain
2067 * @genpdspec: OF phandle args to use for look-up
2069 * Looks for a PM domain provider under the node specified by @genpdspec and if
2070 * found, uses xlate function of the provider to map phandle args to a PM
2073 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2076 static struct generic_pm_domain *genpd_get_from_provider(
2077 struct of_phandle_args *genpdspec)
2079 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2080 struct of_genpd_provider *provider;
2083 return ERR_PTR(-EINVAL);
2085 mutex_lock(&of_genpd_mutex);
2087 /* Check if we have such a provider in our array */
2088 list_for_each_entry(provider, &of_genpd_providers, link) {
2089 if (provider->node == genpdspec->np)
2090 genpd = provider->xlate(genpdspec, provider->data);
2095 mutex_unlock(&of_genpd_mutex);
2101 * of_genpd_add_device() - Add a device to an I/O PM domain
2102 * @genpdspec: OF phandle args to use for look-up PM domain
2103 * @dev: Device to be added.
2105 * Looks-up an I/O PM domain based upon phandle args provided and adds
2106 * the device to the PM domain. Returns a negative error code on failure.
2108 int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
2110 struct generic_pm_domain *genpd;
2113 mutex_lock(&gpd_list_lock);
2115 genpd = genpd_get_from_provider(genpdspec);
2116 if (IS_ERR(genpd)) {
2117 ret = PTR_ERR(genpd);
2121 ret = genpd_add_device(genpd, dev, NULL);
2124 mutex_unlock(&gpd_list_lock);
2128 EXPORT_SYMBOL_GPL(of_genpd_add_device);
2131 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2132 * @parent_spec: OF phandle args to use for parent PM domain look-up
2133 * @subdomain_spec: OF phandle args to use for subdomain look-up
2135 * Looks-up a parent PM domain and subdomain based upon phandle args
2136 * provided and adds the subdomain to the parent PM domain. Returns a
2137 * negative error code on failure.
2139 int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
2140 struct of_phandle_args *subdomain_spec)
2142 struct generic_pm_domain *parent, *subdomain;
2145 mutex_lock(&gpd_list_lock);
2147 parent = genpd_get_from_provider(parent_spec);
2148 if (IS_ERR(parent)) {
2149 ret = PTR_ERR(parent);
2153 subdomain = genpd_get_from_provider(subdomain_spec);
2154 if (IS_ERR(subdomain)) {
2155 ret = PTR_ERR(subdomain);
2159 ret = genpd_add_subdomain(parent, subdomain);
2162 mutex_unlock(&gpd_list_lock);
2166 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2169 * of_genpd_remove_last - Remove the last PM domain registered for a provider
2170 * @provider: Pointer to device structure associated with provider
2172 * Find the last PM domain that was added by a particular provider and
2173 * remove this PM domain from the list of PM domains. The provider is
2174 * identified by the 'provider' device structure that is passed. The PM
2175 * domain will only be removed, if the provider associated with domain
2178 * Returns a valid pointer to struct generic_pm_domain on success or
2179 * ERR_PTR() on failure.
2181 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2183 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2186 if (IS_ERR_OR_NULL(np))
2187 return ERR_PTR(-EINVAL);
2189 mutex_lock(&gpd_list_lock);
2190 list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2191 if (gpd->provider == &np->fwnode) {
2192 ret = genpd_remove(gpd);
2193 genpd = ret ? ERR_PTR(ret) : gpd;
2197 mutex_unlock(&gpd_list_lock);
2201 EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2203 static void genpd_release_dev(struct device *dev)
2208 static struct bus_type genpd_bus_type = {
2213 * genpd_dev_pm_detach - Detach a device from its PM domain.
2214 * @dev: Device to detach.
2215 * @power_off: Currently not used
2217 * Try to locate a corresponding generic PM domain, which the device was
2218 * attached to previously. If such is found, the device is detached from it.
2220 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2222 struct generic_pm_domain *pd;
2226 pd = dev_to_genpd(dev);
2230 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2232 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2233 ret = genpd_remove_device(pd, dev);
2242 dev_err(dev, "failed to remove from PM domain %s: %d",
2247 /* Check if PM domain can be powered off after removing this device. */
2248 genpd_queue_power_off_work(pd);
2250 /* Unregister the device if it was created by genpd. */
2251 if (dev->bus == &genpd_bus_type)
2252 device_unregister(dev);
2255 static void genpd_dev_pm_sync(struct device *dev)
2257 struct generic_pm_domain *pd;
2259 pd = dev_to_genpd(dev);
2263 genpd_queue_power_off_work(pd);
2266 static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
2267 unsigned int index, bool power_on)
2269 struct of_phandle_args pd_args;
2270 struct generic_pm_domain *pd;
2273 ret = of_parse_phandle_with_args(np, "power-domains",
2274 "#power-domain-cells", index, &pd_args);
2278 mutex_lock(&gpd_list_lock);
2279 pd = genpd_get_from_provider(&pd_args);
2280 of_node_put(pd_args.np);
2282 mutex_unlock(&gpd_list_lock);
2283 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2284 __func__, PTR_ERR(pd));
2285 return driver_deferred_probe_check_state(dev);
2288 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2290 ret = genpd_add_device(pd, dev, NULL);
2291 mutex_unlock(&gpd_list_lock);
2294 if (ret != -EPROBE_DEFER)
2295 dev_err(dev, "failed to add to PM domain %s: %d",
2300 dev->pm_domain->detach = genpd_dev_pm_detach;
2301 dev->pm_domain->sync = genpd_dev_pm_sync;
2305 ret = genpd_power_on(pd, 0);
2310 genpd_remove_device(pd, dev);
2312 return ret ? -EPROBE_DEFER : 1;
2316 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2317 * @dev: Device to attach.
2319 * Parse device's OF node to find a PM domain specifier. If such is found,
2320 * attaches the device to retrieved pm_domain ops.
2322 * Returns 1 on successfully attached PM domain, 0 when the device don't need a
2323 * PM domain or when multiple power-domains exists for it, else a negative error
2324 * code. Note that if a power-domain exists for the device, but it cannot be
2325 * found or turned on, then return -EPROBE_DEFER to ensure that the device is
2326 * not probed and to re-try again later.
2328 int genpd_dev_pm_attach(struct device *dev)
2334 * Devices with multiple PM domains must be attached separately, as we
2335 * can only attach one PM domain per device.
2337 if (of_count_phandle_with_args(dev->of_node, "power-domains",
2338 "#power-domain-cells") != 1)
2341 return __genpd_dev_pm_attach(dev, dev->of_node, 0, true);
2343 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2346 * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
2347 * @dev: The device used to lookup the PM domain.
2348 * @index: The index of the PM domain.
2350 * Parse device's OF node to find a PM domain specifier at the provided @index.
2351 * If such is found, creates a virtual device and attaches it to the retrieved
2352 * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
2353 * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
2355 * Returns the created virtual device if successfully attached PM domain, NULL
2356 * when the device don't need a PM domain, else an ERR_PTR() in case of
2357 * failures. If a power-domain exists for the device, but cannot be found or
2358 * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
2359 * is not probed and to re-try again later.
2361 struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2364 struct device *virt_dev;
2371 /* Deal only with devices using multiple PM domains. */
2372 num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
2373 "#power-domain-cells");
2374 if (num_domains < 2 || index >= num_domains)
2377 /* Allocate and register device on the genpd bus. */
2378 virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
2380 return ERR_PTR(-ENOMEM);
2382 dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2383 virt_dev->bus = &genpd_bus_type;
2384 virt_dev->release = genpd_release_dev;
2386 ret = device_register(virt_dev);
2389 return ERR_PTR(ret);
2392 /* Try to attach the device to the PM domain at the specified index. */
2393 ret = __genpd_dev_pm_attach(virt_dev, dev->of_node, index, false);
2395 device_unregister(virt_dev);
2396 return ret ? ERR_PTR(ret) : NULL;
2399 pm_runtime_enable(virt_dev);
2400 genpd_queue_power_off_work(dev_to_genpd(virt_dev));
2404 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
2407 * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
2408 * @dev: The device used to lookup the PM domain.
2409 * @name: The name of the PM domain.
2411 * Parse device's OF node to find a PM domain specifier using the
2412 * power-domain-names DT property. For further description see
2413 * genpd_dev_pm_attach_by_id().
2415 struct device *genpd_dev_pm_attach_by_name(struct device *dev, char *name)
2422 index = of_property_match_string(dev->of_node, "power-domain-names",
2427 return genpd_dev_pm_attach_by_id(dev, index);
2430 static const struct of_device_id idle_state_match[] = {
2431 { .compatible = "domain-idle-state", },
2435 static int genpd_parse_state(struct genpd_power_state *genpd_state,
2436 struct device_node *state_node)
2440 u32 entry_latency, exit_latency;
2442 err = of_property_read_u32(state_node, "entry-latency-us",
2445 pr_debug(" * %pOF missing entry-latency-us property\n",
2450 err = of_property_read_u32(state_node, "exit-latency-us",
2453 pr_debug(" * %pOF missing exit-latency-us property\n",
2458 err = of_property_read_u32(state_node, "min-residency-us", &residency);
2460 genpd_state->residency_ns = 1000 * residency;
2462 genpd_state->power_on_latency_ns = 1000 * exit_latency;
2463 genpd_state->power_off_latency_ns = 1000 * entry_latency;
2464 genpd_state->fwnode = &state_node->fwnode;
2469 static int genpd_iterate_idle_states(struct device_node *dn,
2470 struct genpd_power_state *states)
2473 struct of_phandle_iterator it;
2474 struct device_node *np;
2477 ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2481 /* Loop over the phandles until all the requested entry is found */
2482 of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
2484 if (!of_match_node(idle_state_match, np))
2487 ret = genpd_parse_state(&states[i], np);
2489 pr_err("Parsing idle state node %pOF failed with err %d\n",
2502 * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2504 * @dn: The genpd device node
2505 * @states: The pointer to which the state array will be saved.
2506 * @n: The count of elements in the array returned from this function.
2508 * Returns the device states parsed from the OF node. The memory for the states
2509 * is allocated by this function and is the responsibility of the caller to
2510 * free the memory after use. If any or zero compatible domain idle states is
2511 * found it returns 0 and in case of errors, a negative error code is returned.
2513 int of_genpd_parse_idle_states(struct device_node *dn,
2514 struct genpd_power_state **states, int *n)
2516 struct genpd_power_state *st;
2519 ret = genpd_iterate_idle_states(dn, NULL);
2529 st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
2533 ret = genpd_iterate_idle_states(dn, st);
2536 return ret < 0 ? ret : -EINVAL;
2544 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
2547 * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
2549 * @genpd_dev: Genpd's device for which the performance-state needs to be found.
2550 * @opp: struct dev_pm_opp of the OPP for which we need to find performance
2553 * Returns performance state encoded in the OPP of the genpd. This calls
2554 * platform specific genpd->opp_to_performance_state() callback to translate
2555 * power domain OPP to performance state.
2557 * Returns performance state on success and 0 on failure.
2559 unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
2560 struct dev_pm_opp *opp)
2562 struct generic_pm_domain *genpd = NULL;
2565 genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
2567 if (unlikely(!genpd->opp_to_performance_state))
2571 state = genpd->opp_to_performance_state(genpd, opp);
2572 genpd_unlock(genpd);
2576 EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
2578 static int __init genpd_bus_init(void)
2580 return bus_register(&genpd_bus_type);
2582 core_initcall(genpd_bus_init);
2584 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2587 /*** debugfs support ***/
2589 #ifdef CONFIG_DEBUG_FS
2590 #include <linux/pm.h>
2591 #include <linux/device.h>
2592 #include <linux/debugfs.h>
2593 #include <linux/seq_file.h>
2594 #include <linux/init.h>
2595 #include <linux/kobject.h>
2596 static struct dentry *genpd_debugfs_dir;
2599 * TODO: This function is a slightly modified version of rtpm_status_show
2600 * from sysfs.c, so generalize it.
2602 static void rtpm_status_str(struct seq_file *s, struct device *dev)
2604 static const char * const status_lookup[] = {
2605 [RPM_ACTIVE] = "active",
2606 [RPM_RESUMING] = "resuming",
2607 [RPM_SUSPENDED] = "suspended",
2608 [RPM_SUSPENDING] = "suspending"
2612 if (dev->power.runtime_error)
2614 else if (dev->power.disable_depth)
2616 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2617 p = status_lookup[dev->power.runtime_status];
2624 static int genpd_summary_one(struct seq_file *s,
2625 struct generic_pm_domain *genpd)
2627 static const char * const status_lookup[] = {
2628 [GPD_STATE_ACTIVE] = "on",
2629 [GPD_STATE_POWER_OFF] = "off"
2631 struct pm_domain_data *pm_data;
2632 const char *kobj_path;
2633 struct gpd_link *link;
2637 ret = genpd_lock_interruptible(genpd);
2639 return -ERESTARTSYS;
2641 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2643 if (!genpd_status_on(genpd))
2644 snprintf(state, sizeof(state), "%s-%u",
2645 status_lookup[genpd->status], genpd->state_idx);
2647 snprintf(state, sizeof(state), "%s",
2648 status_lookup[genpd->status]);
2649 seq_printf(s, "%-30s %-15s ", genpd->name, state);
2652 * Modifications on the list require holding locks on both
2653 * master and slave, so we are safe.
2654 * Also genpd->name is immutable.
2656 list_for_each_entry(link, &genpd->master_links, master_node) {
2657 seq_printf(s, "%s", link->slave->name);
2658 if (!list_is_last(&link->master_node, &genpd->master_links))
2662 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2663 kobj_path = kobject_get_path(&pm_data->dev->kobj,
2664 genpd_is_irq_safe(genpd) ?
2665 GFP_ATOMIC : GFP_KERNEL);
2666 if (kobj_path == NULL)
2669 seq_printf(s, "\n %-50s ", kobj_path);
2670 rtpm_status_str(s, pm_data->dev);
2676 genpd_unlock(genpd);
2681 static int genpd_summary_show(struct seq_file *s, void *data)
2683 struct generic_pm_domain *genpd;
2686 seq_puts(s, "domain status slaves\n");
2687 seq_puts(s, " /device runtime status\n");
2688 seq_puts(s, "----------------------------------------------------------------------\n");
2690 ret = mutex_lock_interruptible(&gpd_list_lock);
2692 return -ERESTARTSYS;
2694 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2695 ret = genpd_summary_one(s, genpd);
2699 mutex_unlock(&gpd_list_lock);
2704 static int genpd_status_show(struct seq_file *s, void *data)
2706 static const char * const status_lookup[] = {
2707 [GPD_STATE_ACTIVE] = "on",
2708 [GPD_STATE_POWER_OFF] = "off"
2711 struct generic_pm_domain *genpd = s->private;
2714 ret = genpd_lock_interruptible(genpd);
2716 return -ERESTARTSYS;
2718 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
2721 if (genpd->status == GPD_STATE_POWER_OFF)
2722 seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
2725 seq_printf(s, "%s\n", status_lookup[genpd->status]);
2727 genpd_unlock(genpd);
2731 static int genpd_sub_domains_show(struct seq_file *s, void *data)
2733 struct generic_pm_domain *genpd = s->private;
2734 struct gpd_link *link;
2737 ret = genpd_lock_interruptible(genpd);
2739 return -ERESTARTSYS;
2741 list_for_each_entry(link, &genpd->master_links, master_node)
2742 seq_printf(s, "%s\n", link->slave->name);
2744 genpd_unlock(genpd);
2748 static int genpd_idle_states_show(struct seq_file *s, void *data)
2750 struct generic_pm_domain *genpd = s->private;
2754 ret = genpd_lock_interruptible(genpd);
2756 return -ERESTARTSYS;
2758 seq_puts(s, "State Time Spent(ms)\n");
2760 for (i = 0; i < genpd->state_count; i++) {
2764 if ((genpd->status == GPD_STATE_POWER_OFF) &&
2765 (genpd->state_idx == i))
2766 delta = ktime_sub(ktime_get(), genpd->accounting_time);
2768 msecs = ktime_to_ms(
2769 ktime_add(genpd->states[i].idle_time, delta));
2770 seq_printf(s, "S%-13i %lld\n", i, msecs);
2773 genpd_unlock(genpd);
2777 static int genpd_active_time_show(struct seq_file *s, void *data)
2779 struct generic_pm_domain *genpd = s->private;
2783 ret = genpd_lock_interruptible(genpd);
2785 return -ERESTARTSYS;
2787 if (genpd->status == GPD_STATE_ACTIVE)
2788 delta = ktime_sub(ktime_get(), genpd->accounting_time);
2790 seq_printf(s, "%lld ms\n", ktime_to_ms(
2791 ktime_add(genpd->on_time, delta)));
2793 genpd_unlock(genpd);
2797 static int genpd_total_idle_time_show(struct seq_file *s, void *data)
2799 struct generic_pm_domain *genpd = s->private;
2800 ktime_t delta = 0, total = 0;
2804 ret = genpd_lock_interruptible(genpd);
2806 return -ERESTARTSYS;
2808 for (i = 0; i < genpd->state_count; i++) {
2810 if ((genpd->status == GPD_STATE_POWER_OFF) &&
2811 (genpd->state_idx == i))
2812 delta = ktime_sub(ktime_get(), genpd->accounting_time);
2814 total = ktime_add(total, genpd->states[i].idle_time);
2816 total = ktime_add(total, delta);
2818 seq_printf(s, "%lld ms\n", ktime_to_ms(total));
2820 genpd_unlock(genpd);
2825 static int genpd_devices_show(struct seq_file *s, void *data)
2827 struct generic_pm_domain *genpd = s->private;
2828 struct pm_domain_data *pm_data;
2829 const char *kobj_path;
2832 ret = genpd_lock_interruptible(genpd);
2834 return -ERESTARTSYS;
2836 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2837 kobj_path = kobject_get_path(&pm_data->dev->kobj,
2838 genpd_is_irq_safe(genpd) ?
2839 GFP_ATOMIC : GFP_KERNEL);
2840 if (kobj_path == NULL)
2843 seq_printf(s, "%s\n", kobj_path);
2847 genpd_unlock(genpd);
2851 static int genpd_perf_state_show(struct seq_file *s, void *data)
2853 struct generic_pm_domain *genpd = s->private;
2855 if (genpd_lock_interruptible(genpd))
2856 return -ERESTARTSYS;
2858 seq_printf(s, "%u\n", genpd->performance_state);
2860 genpd_unlock(genpd);
2864 #define define_genpd_open_function(name) \
2865 static int genpd_##name##_open(struct inode *inode, struct file *file) \
2867 return single_open(file, genpd_##name##_show, inode->i_private); \
2870 define_genpd_open_function(summary);
2871 define_genpd_open_function(status);
2872 define_genpd_open_function(sub_domains);
2873 define_genpd_open_function(idle_states);
2874 define_genpd_open_function(active_time);
2875 define_genpd_open_function(total_idle_time);
2876 define_genpd_open_function(devices);
2877 define_genpd_open_function(perf_state);
2879 #define define_genpd_debugfs_fops(name) \
2880 static const struct file_operations genpd_##name##_fops = { \
2881 .open = genpd_##name##_open, \
2883 .llseek = seq_lseek, \
2884 .release = single_release, \
2887 define_genpd_debugfs_fops(summary);
2888 define_genpd_debugfs_fops(status);
2889 define_genpd_debugfs_fops(sub_domains);
2890 define_genpd_debugfs_fops(idle_states);
2891 define_genpd_debugfs_fops(active_time);
2892 define_genpd_debugfs_fops(total_idle_time);
2893 define_genpd_debugfs_fops(devices);
2894 define_genpd_debugfs_fops(perf_state);
2896 static int __init genpd_debug_init(void)
2899 struct generic_pm_domain *genpd;
2901 genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
2903 if (!genpd_debugfs_dir)
2906 d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
2907 genpd_debugfs_dir, NULL, &genpd_summary_fops);
2911 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2912 d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
2916 debugfs_create_file("current_state", 0444,
2917 d, genpd, &genpd_status_fops);
2918 debugfs_create_file("sub_domains", 0444,
2919 d, genpd, &genpd_sub_domains_fops);
2920 debugfs_create_file("idle_states", 0444,
2921 d, genpd, &genpd_idle_states_fops);
2922 debugfs_create_file("active_time", 0444,
2923 d, genpd, &genpd_active_time_fops);
2924 debugfs_create_file("total_idle_time", 0444,
2925 d, genpd, &genpd_total_idle_time_fops);
2926 debugfs_create_file("devices", 0444,
2927 d, genpd, &genpd_devices_fops);
2928 if (genpd->set_performance_state)
2929 debugfs_create_file("perf_state", 0444,
2930 d, genpd, &genpd_perf_state_fops);
2935 late_initcall(genpd_debug_init);
2937 static void __exit genpd_debug_exit(void)
2939 debugfs_remove_recursive(genpd_debugfs_dir);
2941 __exitcall(genpd_debug_exit);
2942 #endif /* CONFIG_DEBUG_FS */