1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/base/power/main.c - Where the driver meets power management.
5 * Copyright (c) 2003 Patrick Mochel
6 * Copyright (c) 2003 Open Source Development Lab
8 * The driver model core calls device_pm_add() when a device is registered.
9 * This will initialize the embedded device_pm_info object in the device
10 * and add it to the list of power-controlled devices. sysfs entries for
11 * controlling device power management will also be added.
13 * A separate list is used for keeping track of power info, because the power
14 * domain dependencies may differ from the ancestral dependencies that the
15 * subsystem list maintains.
18 #define pr_fmt(fmt) "PM: " fmt
19 #define dev_fmt pr_fmt
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
41 typedef int (*pm_callback_t)(struct device *);
43 #define list_for_each_entry_rcu_locked(pos, head, member) \
44 list_for_each_entry_rcu(pos, head, member, \
45 device_links_read_lock_held())
48 * The entries in the dpm_list list are in a depth first order, simply
49 * because children are guaranteed to be discovered after parents, and
50 * are inserted at the back of the list on discovery.
52 * Since device_pm_add() may be called with a device lock held,
53 * we must never try to acquire a device lock while holding
58 static LIST_HEAD(dpm_prepared_list);
59 static LIST_HEAD(dpm_suspended_list);
60 static LIST_HEAD(dpm_late_early_list);
61 static LIST_HEAD(dpm_noirq_list);
63 struct suspend_stats suspend_stats;
64 static DEFINE_MUTEX(dpm_list_mtx);
65 static pm_message_t pm_transition;
67 static int async_error;
69 static const char *pm_verb(int event)
72 case PM_EVENT_SUSPEND:
78 case PM_EVENT_QUIESCE:
80 case PM_EVENT_HIBERNATE:
84 case PM_EVENT_RESTORE:
86 case PM_EVENT_RECOVER:
89 return "(unknown PM event)";
94 * device_pm_sleep_init - Initialize system suspend-related device fields.
95 * @dev: Device object being initialized.
97 void device_pm_sleep_init(struct device *dev)
99 dev->power.is_prepared = false;
100 dev->power.is_suspended = false;
101 dev->power.is_noirq_suspended = false;
102 dev->power.is_late_suspended = false;
103 init_completion(&dev->power.completion);
104 complete_all(&dev->power.completion);
105 dev->power.wakeup = NULL;
106 INIT_LIST_HEAD(&dev->power.entry);
110 * device_pm_lock - Lock the list of active devices used by the PM core.
112 void device_pm_lock(void)
114 mutex_lock(&dpm_list_mtx);
118 * device_pm_unlock - Unlock the list of active devices used by the PM core.
120 void device_pm_unlock(void)
122 mutex_unlock(&dpm_list_mtx);
126 * device_pm_add - Add a device to the PM core's list of active devices.
127 * @dev: Device to add to the list.
129 void device_pm_add(struct device *dev)
131 /* Skip PM setup/initialization. */
132 if (device_pm_not_required(dev))
135 pr_debug("Adding info for %s:%s\n",
136 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
137 device_pm_check_callbacks(dev);
138 mutex_lock(&dpm_list_mtx);
139 if (dev->parent && dev->parent->power.is_prepared)
140 dev_warn(dev, "parent %s should not be sleeping\n",
141 dev_name(dev->parent));
142 list_add_tail(&dev->power.entry, &dpm_list);
143 dev->power.in_dpm_list = true;
144 mutex_unlock(&dpm_list_mtx);
148 * device_pm_remove - Remove a device from the PM core's list of active devices.
149 * @dev: Device to be removed from the list.
151 void device_pm_remove(struct device *dev)
153 if (device_pm_not_required(dev))
156 pr_debug("Removing info for %s:%s\n",
157 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
158 complete_all(&dev->power.completion);
159 mutex_lock(&dpm_list_mtx);
160 list_del_init(&dev->power.entry);
161 dev->power.in_dpm_list = false;
162 mutex_unlock(&dpm_list_mtx);
163 device_wakeup_disable(dev);
164 pm_runtime_remove(dev);
165 device_pm_check_callbacks(dev);
169 * device_pm_move_before - Move device in the PM core's list of active devices.
170 * @deva: Device to move in dpm_list.
171 * @devb: Device @deva should come before.
173 void device_pm_move_before(struct device *deva, struct device *devb)
175 pr_debug("Moving %s:%s before %s:%s\n",
176 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
177 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
178 /* Delete deva from dpm_list and reinsert before devb. */
179 list_move_tail(&deva->power.entry, &devb->power.entry);
183 * device_pm_move_after - Move device in the PM core's list of active devices.
184 * @deva: Device to move in dpm_list.
185 * @devb: Device @deva should come after.
187 void device_pm_move_after(struct device *deva, struct device *devb)
189 pr_debug("Moving %s:%s after %s:%s\n",
190 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
191 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
192 /* Delete deva from dpm_list and reinsert after devb. */
193 list_move(&deva->power.entry, &devb->power.entry);
197 * device_pm_move_last - Move device to end of the PM core's list of devices.
198 * @dev: Device to move in dpm_list.
200 void device_pm_move_last(struct device *dev)
202 pr_debug("Moving %s:%s to end of list\n",
203 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
204 list_move_tail(&dev->power.entry, &dpm_list);
207 static ktime_t initcall_debug_start(struct device *dev, void *cb)
209 if (!pm_print_times_enabled)
212 dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
213 task_pid_nr(current),
214 dev->parent ? dev_name(dev->parent) : "none");
218 static void initcall_debug_report(struct device *dev, ktime_t calltime,
223 if (!pm_print_times_enabled)
226 rettime = ktime_get();
227 dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
228 (unsigned long long)ktime_us_delta(rettime, calltime));
232 * dpm_wait - Wait for a PM operation to complete.
233 * @dev: Device to wait for.
234 * @async: If unset, wait only if the device's power.async_suspend flag is set.
236 static void dpm_wait(struct device *dev, bool async)
241 if (async || (pm_async_enabled && dev->power.async_suspend))
242 wait_for_completion(&dev->power.completion);
245 static int dpm_wait_fn(struct device *dev, void *async_ptr)
247 dpm_wait(dev, *((bool *)async_ptr));
251 static void dpm_wait_for_children(struct device *dev, bool async)
253 device_for_each_child(dev, &async, dpm_wait_fn);
256 static void dpm_wait_for_suppliers(struct device *dev, bool async)
258 struct device_link *link;
261 idx = device_links_read_lock();
264 * If the supplier goes away right after we've checked the link to it,
265 * we'll wait for its completion to change the state, but that's fine,
266 * because the only things that will block as a result are the SRCU
267 * callbacks freeing the link objects for the links in the list we're
270 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
271 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
272 dpm_wait(link->supplier, async);
274 device_links_read_unlock(idx);
277 static bool dpm_wait_for_superior(struct device *dev, bool async)
279 struct device *parent;
282 * If the device is resumed asynchronously and the parent's callback
283 * deletes both the device and the parent itself, the parent object may
284 * be freed while this function is running, so avoid that by reference
285 * counting the parent once more unless the device has been deleted
286 * already (in which case return right away).
288 mutex_lock(&dpm_list_mtx);
290 if (!device_pm_initialized(dev)) {
291 mutex_unlock(&dpm_list_mtx);
295 parent = get_device(dev->parent);
297 mutex_unlock(&dpm_list_mtx);
299 dpm_wait(parent, async);
302 dpm_wait_for_suppliers(dev, async);
305 * If the parent's callback has deleted the device, attempting to resume
306 * it would be invalid, so avoid doing that then.
308 return device_pm_initialized(dev);
311 static void dpm_wait_for_consumers(struct device *dev, bool async)
313 struct device_link *link;
316 idx = device_links_read_lock();
319 * The status of a device link can only be changed from "dormant" by a
320 * probe, but that cannot happen during system suspend/resume. In
321 * theory it can change to "dormant" at that time, but then it is
322 * reasonable to wait for the target device anyway (eg. if it goes
323 * away, it's better to wait for it to go away completely and then
324 * continue instead of trying to continue in parallel with its
327 list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
328 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
329 dpm_wait(link->consumer, async);
331 device_links_read_unlock(idx);
334 static void dpm_wait_for_subordinate(struct device *dev, bool async)
336 dpm_wait_for_children(dev, async);
337 dpm_wait_for_consumers(dev, async);
341 * pm_op - Return the PM operation appropriate for given PM event.
342 * @ops: PM operations to choose from.
343 * @state: PM transition of the system being carried out.
345 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
347 switch (state.event) {
348 #ifdef CONFIG_SUSPEND
349 case PM_EVENT_SUSPEND:
351 case PM_EVENT_RESUME:
353 #endif /* CONFIG_SUSPEND */
354 #ifdef CONFIG_HIBERNATE_CALLBACKS
355 case PM_EVENT_FREEZE:
356 case PM_EVENT_QUIESCE:
358 case PM_EVENT_HIBERNATE:
359 return ops->poweroff;
361 case PM_EVENT_RECOVER:
363 case PM_EVENT_RESTORE:
365 #endif /* CONFIG_HIBERNATE_CALLBACKS */
372 * pm_late_early_op - Return the PM operation appropriate for given PM event.
373 * @ops: PM operations to choose from.
374 * @state: PM transition of the system being carried out.
376 * Runtime PM is disabled for @dev while this function is being executed.
378 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
381 switch (state.event) {
382 #ifdef CONFIG_SUSPEND
383 case PM_EVENT_SUSPEND:
384 return ops->suspend_late;
385 case PM_EVENT_RESUME:
386 return ops->resume_early;
387 #endif /* CONFIG_SUSPEND */
388 #ifdef CONFIG_HIBERNATE_CALLBACKS
389 case PM_EVENT_FREEZE:
390 case PM_EVENT_QUIESCE:
391 return ops->freeze_late;
392 case PM_EVENT_HIBERNATE:
393 return ops->poweroff_late;
395 case PM_EVENT_RECOVER:
396 return ops->thaw_early;
397 case PM_EVENT_RESTORE:
398 return ops->restore_early;
399 #endif /* CONFIG_HIBERNATE_CALLBACKS */
406 * pm_noirq_op - Return the PM operation appropriate for given PM event.
407 * @ops: PM operations to choose from.
408 * @state: PM transition of the system being carried out.
410 * The driver of @dev will not receive interrupts while this function is being
413 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
415 switch (state.event) {
416 #ifdef CONFIG_SUSPEND
417 case PM_EVENT_SUSPEND:
418 return ops->suspend_noirq;
419 case PM_EVENT_RESUME:
420 return ops->resume_noirq;
421 #endif /* CONFIG_SUSPEND */
422 #ifdef CONFIG_HIBERNATE_CALLBACKS
423 case PM_EVENT_FREEZE:
424 case PM_EVENT_QUIESCE:
425 return ops->freeze_noirq;
426 case PM_EVENT_HIBERNATE:
427 return ops->poweroff_noirq;
429 case PM_EVENT_RECOVER:
430 return ops->thaw_noirq;
431 case PM_EVENT_RESTORE:
432 return ops->restore_noirq;
433 #endif /* CONFIG_HIBERNATE_CALLBACKS */
439 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
441 dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
442 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
443 ", may wakeup" : "", dev->power.driver_flags);
446 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
449 dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
453 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
460 calltime = ktime_get();
461 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
462 do_div(usecs64, NSEC_PER_USEC);
467 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
468 info ?: "", info ? " " : "", pm_verb(state.event),
469 error ? "aborted" : "complete",
470 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
473 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
474 pm_message_t state, const char *info)
482 calltime = initcall_debug_start(dev, cb);
484 pm_dev_dbg(dev, state, info);
485 trace_device_pm_callback_start(dev, info, state.event);
487 trace_device_pm_callback_end(dev, error);
488 suspend_report_result(dev, cb, error);
490 initcall_debug_report(dev, calltime, cb, error);
495 #ifdef CONFIG_DPM_WATCHDOG
496 struct dpm_watchdog {
498 struct task_struct *tsk;
499 struct timer_list timer;
502 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
503 struct dpm_watchdog wd
506 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
507 * @t: The timer that PM watchdog depends on.
509 * Called when a driver has timed out suspending or resuming.
510 * There's not much we can do here to recover so panic() to
511 * capture a crash-dump in pstore.
513 static void dpm_watchdog_handler(struct timer_list *t)
515 struct dpm_watchdog *wd = from_timer(wd, t, timer);
517 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
518 show_stack(wd->tsk, NULL, KERN_EMERG);
519 panic("%s %s: unrecoverable failure\n",
520 dev_driver_string(wd->dev), dev_name(wd->dev));
524 * dpm_watchdog_set - Enable pm watchdog for given device.
525 * @wd: Watchdog. Must be allocated on the stack.
526 * @dev: Device to handle.
528 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
530 struct timer_list *timer = &wd->timer;
535 timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
536 /* use same timeout value for both suspend and resume */
537 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
542 * dpm_watchdog_clear - Disable suspend/resume watchdog.
543 * @wd: Watchdog to disable.
545 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
547 struct timer_list *timer = &wd->timer;
549 del_timer_sync(timer);
550 destroy_timer_on_stack(timer);
553 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
554 #define dpm_watchdog_set(x, y)
555 #define dpm_watchdog_clear(x)
558 /*------------------------- Resume routines -------------------------*/
561 * dev_pm_skip_resume - System-wide device resume optimization check.
562 * @dev: Target device.
565 * - %false if the transition under way is RESTORE.
566 * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
567 * - The logical negation of %power.must_resume otherwise (that is, when the
568 * transition under way is RESUME).
570 bool dev_pm_skip_resume(struct device *dev)
572 if (pm_transition.event == PM_EVENT_RESTORE)
575 if (pm_transition.event == PM_EVENT_THAW)
576 return dev_pm_skip_suspend(dev);
578 return !dev->power.must_resume;
581 static bool is_async(struct device *dev)
583 return dev->power.async_suspend && pm_async_enabled
584 && !pm_trace_is_enabled();
587 static bool dpm_async_fn(struct device *dev, async_func_t func)
589 reinit_completion(&dev->power.completion);
592 dev->power.async_in_progress = true;
596 if (async_schedule_dev_nocall(func, dev))
602 * Because async_schedule_dev_nocall() above has returned false or it
603 * has not been called at all, func() is not running and it is safe to
604 * update the async_in_progress flag without extra synchronization.
606 dev->power.async_in_progress = false;
611 * device_resume_noirq - Execute a "noirq resume" callback for given device.
612 * @dev: Device to handle.
613 * @state: PM transition of the system being carried out.
614 * @async: If true, the device is being resumed asynchronously.
616 * The driver of @dev will not receive interrupts while this function is being
619 static void device_resume_noirq(struct device *dev, pm_message_t state, bool async)
621 pm_callback_t callback = NULL;
622 const char *info = NULL;
629 if (dev->power.syscore || dev->power.direct_complete)
632 if (!dev->power.is_noirq_suspended)
635 if (!dpm_wait_for_superior(dev, async))
638 skip_resume = dev_pm_skip_resume(dev);
640 * If the driver callback is skipped below or by the middle layer
641 * callback and device_resume_early() also skips the driver callback for
642 * this device later, it needs to appear as "suspended" to PM-runtime,
643 * so change its status accordingly.
645 * Otherwise, the device is going to be resumed, so set its PM-runtime
646 * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
647 * to avoid confusing drivers that don't use it.
650 pm_runtime_set_suspended(dev);
651 else if (dev_pm_skip_suspend(dev))
652 pm_runtime_set_active(dev);
654 if (dev->pm_domain) {
655 info = "noirq power domain ";
656 callback = pm_noirq_op(&dev->pm_domain->ops, state);
657 } else if (dev->type && dev->type->pm) {
658 info = "noirq type ";
659 callback = pm_noirq_op(dev->type->pm, state);
660 } else if (dev->class && dev->class->pm) {
661 info = "noirq class ";
662 callback = pm_noirq_op(dev->class->pm, state);
663 } else if (dev->bus && dev->bus->pm) {
665 callback = pm_noirq_op(dev->bus->pm, state);
673 if (dev->driver && dev->driver->pm) {
674 info = "noirq driver ";
675 callback = pm_noirq_op(dev->driver->pm, state);
679 error = dpm_run_callback(callback, dev, state, info);
682 dev->power.is_noirq_suspended = false;
685 complete_all(&dev->power.completion);
689 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
690 dpm_save_failed_dev(dev_name(dev));
691 pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
695 static void async_resume_noirq(void *data, async_cookie_t cookie)
697 struct device *dev = data;
699 device_resume_noirq(dev, pm_transition, true);
703 static void dpm_noirq_resume_devices(pm_message_t state)
706 ktime_t starttime = ktime_get();
708 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
709 mutex_lock(&dpm_list_mtx);
710 pm_transition = state;
713 * Trigger the resume of "async" devices upfront so they don't have to
714 * wait for the "non-async" ones they don't depend on.
716 list_for_each_entry(dev, &dpm_noirq_list, power.entry)
717 dpm_async_fn(dev, async_resume_noirq);
719 while (!list_empty(&dpm_noirq_list)) {
720 dev = to_device(dpm_noirq_list.next);
721 list_move_tail(&dev->power.entry, &dpm_late_early_list);
723 if (!dev->power.async_in_progress) {
726 mutex_unlock(&dpm_list_mtx);
728 device_resume_noirq(dev, state, false);
732 mutex_lock(&dpm_list_mtx);
735 mutex_unlock(&dpm_list_mtx);
736 async_synchronize_full();
737 dpm_show_time(starttime, state, 0, "noirq");
738 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
742 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
743 * @state: PM transition of the system being carried out.
745 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
746 * allow device drivers' interrupt handlers to be called.
748 void dpm_resume_noirq(pm_message_t state)
750 dpm_noirq_resume_devices(state);
752 resume_device_irqs();
753 device_wakeup_disarm_wake_irqs();
757 * device_resume_early - Execute an "early resume" callback for given device.
758 * @dev: Device to handle.
759 * @state: PM transition of the system being carried out.
760 * @async: If true, the device is being resumed asynchronously.
762 * Runtime PM is disabled for @dev while this function is being executed.
764 static void device_resume_early(struct device *dev, pm_message_t state, bool async)
766 pm_callback_t callback = NULL;
767 const char *info = NULL;
773 if (dev->power.syscore || dev->power.direct_complete)
776 if (!dev->power.is_late_suspended)
779 if (!dpm_wait_for_superior(dev, async))
782 if (dev->pm_domain) {
783 info = "early power domain ";
784 callback = pm_late_early_op(&dev->pm_domain->ops, state);
785 } else if (dev->type && dev->type->pm) {
786 info = "early type ";
787 callback = pm_late_early_op(dev->type->pm, state);
788 } else if (dev->class && dev->class->pm) {
789 info = "early class ";
790 callback = pm_late_early_op(dev->class->pm, state);
791 } else if (dev->bus && dev->bus->pm) {
793 callback = pm_late_early_op(dev->bus->pm, state);
798 if (dev_pm_skip_resume(dev))
801 if (dev->driver && dev->driver->pm) {
802 info = "early driver ";
803 callback = pm_late_early_op(dev->driver->pm, state);
807 error = dpm_run_callback(callback, dev, state, info);
810 dev->power.is_late_suspended = false;
815 pm_runtime_enable(dev);
816 complete_all(&dev->power.completion);
819 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
820 dpm_save_failed_dev(dev_name(dev));
821 pm_dev_err(dev, state, async ? " async early" : " early", error);
825 static void async_resume_early(void *data, async_cookie_t cookie)
827 struct device *dev = data;
829 device_resume_early(dev, pm_transition, true);
834 * dpm_resume_early - Execute "early resume" callbacks for all devices.
835 * @state: PM transition of the system being carried out.
837 void dpm_resume_early(pm_message_t state)
840 ktime_t starttime = ktime_get();
842 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
843 mutex_lock(&dpm_list_mtx);
844 pm_transition = state;
847 * Trigger the resume of "async" devices upfront so they don't have to
848 * wait for the "non-async" ones they don't depend on.
850 list_for_each_entry(dev, &dpm_late_early_list, power.entry)
851 dpm_async_fn(dev, async_resume_early);
853 while (!list_empty(&dpm_late_early_list)) {
854 dev = to_device(dpm_late_early_list.next);
855 list_move_tail(&dev->power.entry, &dpm_suspended_list);
857 if (!dev->power.async_in_progress) {
860 mutex_unlock(&dpm_list_mtx);
862 device_resume_early(dev, state, false);
866 mutex_lock(&dpm_list_mtx);
869 mutex_unlock(&dpm_list_mtx);
870 async_synchronize_full();
871 dpm_show_time(starttime, state, 0, "early");
872 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
876 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
877 * @state: PM transition of the system being carried out.
879 void dpm_resume_start(pm_message_t state)
881 dpm_resume_noirq(state);
882 dpm_resume_early(state);
884 EXPORT_SYMBOL_GPL(dpm_resume_start);
887 * device_resume - Execute "resume" callbacks for given device.
888 * @dev: Device to handle.
889 * @state: PM transition of the system being carried out.
890 * @async: If true, the device is being resumed asynchronously.
892 static void device_resume(struct device *dev, pm_message_t state, bool async)
894 pm_callback_t callback = NULL;
895 const char *info = NULL;
897 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
902 if (dev->power.syscore)
905 if (dev->power.direct_complete) {
906 /* Match the pm_runtime_disable() in __device_suspend(). */
907 pm_runtime_enable(dev);
911 if (!dpm_wait_for_superior(dev, async))
914 dpm_watchdog_set(&wd, dev);
918 * This is a fib. But we'll allow new children to be added below
919 * a resumed device, even if the device hasn't been completed yet.
921 dev->power.is_prepared = false;
923 if (!dev->power.is_suspended)
926 if (dev->pm_domain) {
927 info = "power domain ";
928 callback = pm_op(&dev->pm_domain->ops, state);
932 if (dev->type && dev->type->pm) {
934 callback = pm_op(dev->type->pm, state);
938 if (dev->class && dev->class->pm) {
940 callback = pm_op(dev->class->pm, state);
947 callback = pm_op(dev->bus->pm, state);
948 } else if (dev->bus->resume) {
949 info = "legacy bus ";
950 callback = dev->bus->resume;
956 if (!callback && dev->driver && dev->driver->pm) {
958 callback = pm_op(dev->driver->pm, state);
962 error = dpm_run_callback(callback, dev, state, info);
963 dev->power.is_suspended = false;
967 dpm_watchdog_clear(&wd);
970 complete_all(&dev->power.completion);
975 dpm_save_failed_step(SUSPEND_RESUME);
976 dpm_save_failed_dev(dev_name(dev));
977 pm_dev_err(dev, state, async ? " async" : "", error);
981 static void async_resume(void *data, async_cookie_t cookie)
983 struct device *dev = data;
985 device_resume(dev, pm_transition, true);
990 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
991 * @state: PM transition of the system being carried out.
993 * Execute the appropriate "resume" callback for all devices whose status
994 * indicates that they are suspended.
996 void dpm_resume(pm_message_t state)
999 ktime_t starttime = ktime_get();
1001 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1004 mutex_lock(&dpm_list_mtx);
1005 pm_transition = state;
1009 * Trigger the resume of "async" devices upfront so they don't have to
1010 * wait for the "non-async" ones they don't depend on.
1012 list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1013 dpm_async_fn(dev, async_resume);
1015 while (!list_empty(&dpm_suspended_list)) {
1016 dev = to_device(dpm_suspended_list.next);
1017 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1019 if (!dev->power.async_in_progress) {
1022 mutex_unlock(&dpm_list_mtx);
1024 device_resume(dev, state, false);
1028 mutex_lock(&dpm_list_mtx);
1031 mutex_unlock(&dpm_list_mtx);
1032 async_synchronize_full();
1033 dpm_show_time(starttime, state, 0, NULL);
1037 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1041 * device_complete - Complete a PM transition for given device.
1042 * @dev: Device to handle.
1043 * @state: PM transition of the system being carried out.
1045 static void device_complete(struct device *dev, pm_message_t state)
1047 void (*callback)(struct device *) = NULL;
1048 const char *info = NULL;
1050 if (dev->power.syscore)
1055 if (dev->pm_domain) {
1056 info = "completing power domain ";
1057 callback = dev->pm_domain->ops.complete;
1058 } else if (dev->type && dev->type->pm) {
1059 info = "completing type ";
1060 callback = dev->type->pm->complete;
1061 } else if (dev->class && dev->class->pm) {
1062 info = "completing class ";
1063 callback = dev->class->pm->complete;
1064 } else if (dev->bus && dev->bus->pm) {
1065 info = "completing bus ";
1066 callback = dev->bus->pm->complete;
1069 if (!callback && dev->driver && dev->driver->pm) {
1070 info = "completing driver ";
1071 callback = dev->driver->pm->complete;
1075 pm_dev_dbg(dev, state, info);
1082 pm_runtime_put(dev);
1086 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1087 * @state: PM transition of the system being carried out.
1089 * Execute the ->complete() callbacks for all devices whose PM status is not
1090 * DPM_ON (this allows new devices to be registered).
1092 void dpm_complete(pm_message_t state)
1094 struct list_head list;
1096 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1099 INIT_LIST_HEAD(&list);
1100 mutex_lock(&dpm_list_mtx);
1101 while (!list_empty(&dpm_prepared_list)) {
1102 struct device *dev = to_device(dpm_prepared_list.prev);
1105 dev->power.is_prepared = false;
1106 list_move(&dev->power.entry, &list);
1108 mutex_unlock(&dpm_list_mtx);
1110 trace_device_pm_callback_start(dev, "", state.event);
1111 device_complete(dev, state);
1112 trace_device_pm_callback_end(dev, 0);
1116 mutex_lock(&dpm_list_mtx);
1118 list_splice(&list, &dpm_list);
1119 mutex_unlock(&dpm_list_mtx);
1121 /* Allow device probing and trigger re-probing of deferred devices */
1122 device_unblock_probing();
1123 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1127 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1128 * @state: PM transition of the system being carried out.
1130 * Execute "resume" callbacks for all devices and complete the PM transition of
1133 void dpm_resume_end(pm_message_t state)
1136 dpm_complete(state);
1138 EXPORT_SYMBOL_GPL(dpm_resume_end);
1141 /*------------------------- Suspend routines -------------------------*/
1144 * resume_event - Return a "resume" message for given "suspend" sleep state.
1145 * @sleep_state: PM message representing a sleep state.
1147 * Return a PM message representing the resume event corresponding to given
1150 static pm_message_t resume_event(pm_message_t sleep_state)
1152 switch (sleep_state.event) {
1153 case PM_EVENT_SUSPEND:
1155 case PM_EVENT_FREEZE:
1156 case PM_EVENT_QUIESCE:
1157 return PMSG_RECOVER;
1158 case PM_EVENT_HIBERNATE:
1159 return PMSG_RESTORE;
1164 static void dpm_superior_set_must_resume(struct device *dev)
1166 struct device_link *link;
1170 dev->parent->power.must_resume = true;
1172 idx = device_links_read_lock();
1174 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1175 link->supplier->power.must_resume = true;
1177 device_links_read_unlock(idx);
1181 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1182 * @dev: Device to handle.
1183 * @state: PM transition of the system being carried out.
1184 * @async: If true, the device is being suspended asynchronously.
1186 * The driver of @dev will not receive interrupts while this function is being
1189 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1191 pm_callback_t callback = NULL;
1192 const char *info = NULL;
1198 dpm_wait_for_subordinate(dev, async);
1203 if (dev->power.syscore || dev->power.direct_complete)
1206 if (dev->pm_domain) {
1207 info = "noirq power domain ";
1208 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1209 } else if (dev->type && dev->type->pm) {
1210 info = "noirq type ";
1211 callback = pm_noirq_op(dev->type->pm, state);
1212 } else if (dev->class && dev->class->pm) {
1213 info = "noirq class ";
1214 callback = pm_noirq_op(dev->class->pm, state);
1215 } else if (dev->bus && dev->bus->pm) {
1216 info = "noirq bus ";
1217 callback = pm_noirq_op(dev->bus->pm, state);
1222 if (dev_pm_skip_suspend(dev))
1225 if (dev->driver && dev->driver->pm) {
1226 info = "noirq driver ";
1227 callback = pm_noirq_op(dev->driver->pm, state);
1231 error = dpm_run_callback(callback, dev, state, info);
1233 async_error = error;
1238 dev->power.is_noirq_suspended = true;
1241 * Skipping the resume of devices that were in use right before the
1242 * system suspend (as indicated by their PM-runtime usage counters)
1243 * would be suboptimal. Also resume them if doing that is not allowed
1246 if (atomic_read(&dev->power.usage_count) > 1 ||
1247 !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1248 dev->power.may_skip_resume))
1249 dev->power.must_resume = true;
1251 if (dev->power.must_resume)
1252 dpm_superior_set_must_resume(dev);
1255 complete_all(&dev->power.completion);
1256 TRACE_SUSPEND(error);
1260 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1262 struct device *dev = data;
1265 error = __device_suspend_noirq(dev, pm_transition, true);
1267 dpm_save_failed_dev(dev_name(dev));
1268 pm_dev_err(dev, pm_transition, " async", error);
1274 static int device_suspend_noirq(struct device *dev)
1276 if (dpm_async_fn(dev, async_suspend_noirq))
1279 return __device_suspend_noirq(dev, pm_transition, false);
1282 static int dpm_noirq_suspend_devices(pm_message_t state)
1284 ktime_t starttime = ktime_get();
1287 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1288 mutex_lock(&dpm_list_mtx);
1289 pm_transition = state;
1292 while (!list_empty(&dpm_late_early_list)) {
1293 struct device *dev = to_device(dpm_late_early_list.prev);
1296 mutex_unlock(&dpm_list_mtx);
1298 error = device_suspend_noirq(dev);
1300 mutex_lock(&dpm_list_mtx);
1303 pm_dev_err(dev, state, " noirq", error);
1304 dpm_save_failed_dev(dev_name(dev));
1305 } else if (!list_empty(&dev->power.entry)) {
1306 list_move(&dev->power.entry, &dpm_noirq_list);
1309 mutex_unlock(&dpm_list_mtx);
1313 mutex_lock(&dpm_list_mtx);
1315 if (error || async_error)
1318 mutex_unlock(&dpm_list_mtx);
1319 async_synchronize_full();
1321 error = async_error;
1324 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1326 dpm_show_time(starttime, state, error, "noirq");
1327 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1332 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1333 * @state: PM transition of the system being carried out.
1335 * Prevent device drivers' interrupt handlers from being called and invoke
1336 * "noirq" suspend callbacks for all non-sysdev devices.
1338 int dpm_suspend_noirq(pm_message_t state)
1342 device_wakeup_arm_wake_irqs();
1343 suspend_device_irqs();
1345 ret = dpm_noirq_suspend_devices(state);
1347 dpm_resume_noirq(resume_event(state));
1352 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1354 struct device *parent = dev->parent;
1359 spin_lock_irq(&parent->power.lock);
1361 if (device_wakeup_path(dev) && !parent->power.ignore_children)
1362 parent->power.wakeup_path = true;
1364 spin_unlock_irq(&parent->power.lock);
1368 * __device_suspend_late - Execute a "late suspend" callback for given device.
1369 * @dev: Device to handle.
1370 * @state: PM transition of the system being carried out.
1371 * @async: If true, the device is being suspended asynchronously.
1373 * Runtime PM is disabled for @dev while this function is being executed.
1375 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1377 pm_callback_t callback = NULL;
1378 const char *info = NULL;
1384 __pm_runtime_disable(dev, false);
1386 dpm_wait_for_subordinate(dev, async);
1391 if (pm_wakeup_pending()) {
1392 async_error = -EBUSY;
1396 if (dev->power.syscore || dev->power.direct_complete)
1399 if (dev->pm_domain) {
1400 info = "late power domain ";
1401 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1402 } else if (dev->type && dev->type->pm) {
1403 info = "late type ";
1404 callback = pm_late_early_op(dev->type->pm, state);
1405 } else if (dev->class && dev->class->pm) {
1406 info = "late class ";
1407 callback = pm_late_early_op(dev->class->pm, state);
1408 } else if (dev->bus && dev->bus->pm) {
1410 callback = pm_late_early_op(dev->bus->pm, state);
1415 if (dev_pm_skip_suspend(dev))
1418 if (dev->driver && dev->driver->pm) {
1419 info = "late driver ";
1420 callback = pm_late_early_op(dev->driver->pm, state);
1424 error = dpm_run_callback(callback, dev, state, info);
1426 async_error = error;
1429 dpm_propagate_wakeup_to_parent(dev);
1432 dev->power.is_late_suspended = true;
1435 TRACE_SUSPEND(error);
1436 complete_all(&dev->power.completion);
1440 static void async_suspend_late(void *data, async_cookie_t cookie)
1442 struct device *dev = data;
1445 error = __device_suspend_late(dev, pm_transition, true);
1447 dpm_save_failed_dev(dev_name(dev));
1448 pm_dev_err(dev, pm_transition, " async", error);
1453 static int device_suspend_late(struct device *dev)
1455 if (dpm_async_fn(dev, async_suspend_late))
1458 return __device_suspend_late(dev, pm_transition, false);
1462 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1463 * @state: PM transition of the system being carried out.
1465 int dpm_suspend_late(pm_message_t state)
1467 ktime_t starttime = ktime_get();
1470 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1471 wake_up_all_idle_cpus();
1472 mutex_lock(&dpm_list_mtx);
1473 pm_transition = state;
1476 while (!list_empty(&dpm_suspended_list)) {
1477 struct device *dev = to_device(dpm_suspended_list.prev);
1481 mutex_unlock(&dpm_list_mtx);
1483 error = device_suspend_late(dev);
1485 mutex_lock(&dpm_list_mtx);
1487 if (!list_empty(&dev->power.entry))
1488 list_move(&dev->power.entry, &dpm_late_early_list);
1491 pm_dev_err(dev, state, " late", error);
1492 dpm_save_failed_dev(dev_name(dev));
1495 mutex_unlock(&dpm_list_mtx);
1499 mutex_lock(&dpm_list_mtx);
1501 if (error || async_error)
1504 mutex_unlock(&dpm_list_mtx);
1505 async_synchronize_full();
1507 error = async_error;
1510 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1511 dpm_resume_early(resume_event(state));
1513 dpm_show_time(starttime, state, error, "late");
1514 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1519 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1520 * @state: PM transition of the system being carried out.
1522 int dpm_suspend_end(pm_message_t state)
1524 ktime_t starttime = ktime_get();
1527 error = dpm_suspend_late(state);
1531 error = dpm_suspend_noirq(state);
1533 dpm_resume_early(resume_event(state));
1536 dpm_show_time(starttime, state, error, "end");
1539 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1542 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1543 * @dev: Device to suspend.
1544 * @state: PM transition of the system being carried out.
1545 * @cb: Suspend callback to execute.
1546 * @info: string description of caller.
1548 static int legacy_suspend(struct device *dev, pm_message_t state,
1549 int (*cb)(struct device *dev, pm_message_t state),
1555 calltime = initcall_debug_start(dev, cb);
1557 trace_device_pm_callback_start(dev, info, state.event);
1558 error = cb(dev, state);
1559 trace_device_pm_callback_end(dev, error);
1560 suspend_report_result(dev, cb, error);
1562 initcall_debug_report(dev, calltime, cb, error);
1567 static void dpm_clear_superiors_direct_complete(struct device *dev)
1569 struct device_link *link;
1573 spin_lock_irq(&dev->parent->power.lock);
1574 dev->parent->power.direct_complete = false;
1575 spin_unlock_irq(&dev->parent->power.lock);
1578 idx = device_links_read_lock();
1580 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1581 spin_lock_irq(&link->supplier->power.lock);
1582 link->supplier->power.direct_complete = false;
1583 spin_unlock_irq(&link->supplier->power.lock);
1586 device_links_read_unlock(idx);
1590 * __device_suspend - Execute "suspend" callbacks for given device.
1591 * @dev: Device to handle.
1592 * @state: PM transition of the system being carried out.
1593 * @async: If true, the device is being suspended asynchronously.
1595 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1597 pm_callback_t callback = NULL;
1598 const char *info = NULL;
1600 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1605 dpm_wait_for_subordinate(dev, async);
1608 dev->power.direct_complete = false;
1613 * Wait for possible runtime PM transitions of the device in progress
1614 * to complete and if there's a runtime resume request pending for it,
1615 * resume it before proceeding with invoking the system-wide suspend
1618 * If the system-wide suspend callbacks below change the configuration
1619 * of the device, they must disable runtime PM for it or otherwise
1620 * ensure that its runtime-resume callbacks will not be confused by that
1621 * change in case they are invoked going forward.
1623 pm_runtime_barrier(dev);
1625 if (pm_wakeup_pending()) {
1626 dev->power.direct_complete = false;
1627 async_error = -EBUSY;
1631 if (dev->power.syscore)
1634 /* Avoid direct_complete to let wakeup_path propagate. */
1635 if (device_may_wakeup(dev) || device_wakeup_path(dev))
1636 dev->power.direct_complete = false;
1638 if (dev->power.direct_complete) {
1639 if (pm_runtime_status_suspended(dev)) {
1640 pm_runtime_disable(dev);
1641 if (pm_runtime_status_suspended(dev)) {
1642 pm_dev_dbg(dev, state, "direct-complete ");
1646 pm_runtime_enable(dev);
1648 dev->power.direct_complete = false;
1651 dev->power.may_skip_resume = true;
1652 dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1654 dpm_watchdog_set(&wd, dev);
1657 if (dev->pm_domain) {
1658 info = "power domain ";
1659 callback = pm_op(&dev->pm_domain->ops, state);
1663 if (dev->type && dev->type->pm) {
1665 callback = pm_op(dev->type->pm, state);
1669 if (dev->class && dev->class->pm) {
1671 callback = pm_op(dev->class->pm, state);
1678 callback = pm_op(dev->bus->pm, state);
1679 } else if (dev->bus->suspend) {
1680 pm_dev_dbg(dev, state, "legacy bus ");
1681 error = legacy_suspend(dev, state, dev->bus->suspend,
1688 if (!callback && dev->driver && dev->driver->pm) {
1690 callback = pm_op(dev->driver->pm, state);
1693 error = dpm_run_callback(callback, dev, state, info);
1697 dev->power.is_suspended = true;
1698 if (device_may_wakeup(dev))
1699 dev->power.wakeup_path = true;
1701 dpm_propagate_wakeup_to_parent(dev);
1702 dpm_clear_superiors_direct_complete(dev);
1706 dpm_watchdog_clear(&wd);
1710 async_error = error;
1712 complete_all(&dev->power.completion);
1713 TRACE_SUSPEND(error);
1717 static void async_suspend(void *data, async_cookie_t cookie)
1719 struct device *dev = data;
1722 error = __device_suspend(dev, pm_transition, true);
1724 dpm_save_failed_dev(dev_name(dev));
1725 pm_dev_err(dev, pm_transition, " async", error);
1731 static int device_suspend(struct device *dev)
1733 if (dpm_async_fn(dev, async_suspend))
1736 return __device_suspend(dev, pm_transition, false);
1740 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1741 * @state: PM transition of the system being carried out.
1743 int dpm_suspend(pm_message_t state)
1745 ktime_t starttime = ktime_get();
1748 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1754 mutex_lock(&dpm_list_mtx);
1755 pm_transition = state;
1757 while (!list_empty(&dpm_prepared_list)) {
1758 struct device *dev = to_device(dpm_prepared_list.prev);
1762 mutex_unlock(&dpm_list_mtx);
1764 error = device_suspend(dev);
1766 mutex_lock(&dpm_list_mtx);
1769 pm_dev_err(dev, state, "", error);
1770 dpm_save_failed_dev(dev_name(dev));
1771 } else if (!list_empty(&dev->power.entry)) {
1772 list_move(&dev->power.entry, &dpm_suspended_list);
1775 mutex_unlock(&dpm_list_mtx);
1779 mutex_lock(&dpm_list_mtx);
1781 if (error || async_error)
1784 mutex_unlock(&dpm_list_mtx);
1785 async_synchronize_full();
1787 error = async_error;
1790 dpm_save_failed_step(SUSPEND_SUSPEND);
1792 dpm_show_time(starttime, state, error, NULL);
1793 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1798 * device_prepare - Prepare a device for system power transition.
1799 * @dev: Device to handle.
1800 * @state: PM transition of the system being carried out.
1802 * Execute the ->prepare() callback(s) for given device. No new children of the
1803 * device may be registered after this function has returned.
1805 static int device_prepare(struct device *dev, pm_message_t state)
1807 int (*callback)(struct device *) = NULL;
1811 * If a device's parent goes into runtime suspend at the wrong time,
1812 * it won't be possible to resume the device. To prevent this we
1813 * block runtime suspend here, during the prepare phase, and allow
1814 * it again during the complete phase.
1816 pm_runtime_get_noresume(dev);
1818 if (dev->power.syscore)
1823 dev->power.wakeup_path = false;
1825 if (dev->power.no_pm_callbacks)
1829 callback = dev->pm_domain->ops.prepare;
1830 else if (dev->type && dev->type->pm)
1831 callback = dev->type->pm->prepare;
1832 else if (dev->class && dev->class->pm)
1833 callback = dev->class->pm->prepare;
1834 else if (dev->bus && dev->bus->pm)
1835 callback = dev->bus->pm->prepare;
1837 if (!callback && dev->driver && dev->driver->pm)
1838 callback = dev->driver->pm->prepare;
1841 ret = callback(dev);
1847 suspend_report_result(dev, callback, ret);
1848 pm_runtime_put(dev);
1852 * A positive return value from ->prepare() means "this device appears
1853 * to be runtime-suspended and its state is fine, so if it really is
1854 * runtime-suspended, you can leave it in that state provided that you
1855 * will do the same thing with all of its descendants". This only
1856 * applies to suspend transitions, however.
1858 spin_lock_irq(&dev->power.lock);
1859 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1860 (ret > 0 || dev->power.no_pm_callbacks) &&
1861 !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1862 spin_unlock_irq(&dev->power.lock);
1867 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1868 * @state: PM transition of the system being carried out.
1870 * Execute the ->prepare() callback(s) for all devices.
1872 int dpm_prepare(pm_message_t state)
1876 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1880 * Give a chance for the known devices to complete their probes, before
1881 * disable probing of devices. This sync point is important at least
1882 * at boot time + hibernation restore.
1884 wait_for_device_probe();
1886 * It is unsafe if probing of devices will happen during suspend or
1887 * hibernation and system behavior will be unpredictable in this case.
1888 * So, let's prohibit device's probing here and defer their probes
1889 * instead. The normal behavior will be restored in dpm_complete().
1891 device_block_probing();
1893 mutex_lock(&dpm_list_mtx);
1894 while (!list_empty(&dpm_list) && !error) {
1895 struct device *dev = to_device(dpm_list.next);
1899 mutex_unlock(&dpm_list_mtx);
1901 trace_device_pm_callback_start(dev, "", state.event);
1902 error = device_prepare(dev, state);
1903 trace_device_pm_callback_end(dev, error);
1905 mutex_lock(&dpm_list_mtx);
1908 dev->power.is_prepared = true;
1909 if (!list_empty(&dev->power.entry))
1910 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1911 } else if (error == -EAGAIN) {
1914 dev_info(dev, "not prepared for power transition: code %d\n",
1918 mutex_unlock(&dpm_list_mtx);
1922 mutex_lock(&dpm_list_mtx);
1924 mutex_unlock(&dpm_list_mtx);
1925 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1930 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1931 * @state: PM transition of the system being carried out.
1933 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1934 * callbacks for them.
1936 int dpm_suspend_start(pm_message_t state)
1938 ktime_t starttime = ktime_get();
1941 error = dpm_prepare(state);
1943 dpm_save_failed_step(SUSPEND_PREPARE);
1945 error = dpm_suspend(state);
1947 dpm_show_time(starttime, state, error, "start");
1950 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1952 void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
1955 dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret);
1957 EXPORT_SYMBOL_GPL(__suspend_report_result);
1960 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1961 * @subordinate: Device that needs to wait for @dev.
1962 * @dev: Device to wait for.
1964 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1966 dpm_wait(dev, subordinate->power.async_suspend);
1969 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1972 * dpm_for_each_dev - device iterator.
1973 * @data: data for the callback.
1974 * @fn: function to be called for each device.
1976 * Iterate over devices in dpm_list, and call @fn for each device,
1979 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1987 list_for_each_entry(dev, &dpm_list, power.entry)
1991 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1993 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1998 return !ops->prepare &&
2000 !ops->suspend_late &&
2001 !ops->suspend_noirq &&
2002 !ops->resume_noirq &&
2003 !ops->resume_early &&
2008 void device_pm_check_callbacks(struct device *dev)
2010 unsigned long flags;
2012 spin_lock_irqsave(&dev->power.lock, flags);
2013 dev->power.no_pm_callbacks =
2014 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2015 !dev->bus->suspend && !dev->bus->resume)) &&
2016 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2017 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2018 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2019 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2020 !dev->driver->suspend && !dev->driver->resume));
2021 spin_unlock_irqrestore(&dev->power.lock, flags);
2024 bool dev_pm_skip_suspend(struct device *dev)
2026 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2027 pm_runtime_status_suspended(dev);