PM / suspend: Export pm_suspend_target_state
[linux-block.git] / drivers / base / power / main.c
CommitLineData
1da177e4
LT
1/*
2 * drivers/base/power/main.c - Where the driver meets power management.
3 *
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 *
7 * This file is released under the GPLv2
8 *
9 *
10 * The driver model core calls device_pm_add() when a device is registered.
b595076a 11 * This will initialize the embedded device_pm_info object in the device
1da177e4
LT
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
14 *
1eede070
RW
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
1da177e4
LT
18 */
19
1da177e4 20#include <linux/device.h>
cd59abfc 21#include <linux/kallsyms.h>
1b6bc32f 22#include <linux/export.h>
11048dcf 23#include <linux/mutex.h>
cd59abfc 24#include <linux/pm.h>
5e928f77 25#include <linux/pm_runtime.h>
431d452a 26#include <linux/pm-trace.h>
4990d4fe 27#include <linux/pm_wakeirq.h>
2ed8d2b3 28#include <linux/interrupt.h>
f2511774 29#include <linux/sched.h>
b17b0153 30#include <linux/sched/debug.h>
5af84b82 31#include <linux/async.h>
1e75227e 32#include <linux/suspend.h>
53644677 33#include <trace/events/power.h>
2f0aea93 34#include <linux/cpufreq.h>
8651f97b 35#include <linux/cpuidle.h>
70fea60d
BG
36#include <linux/timer.h>
37
cd59abfc 38#include "../base.h"
1da177e4
LT
39#include "power.h"
40
9cf519d1
RW
41typedef int (*pm_callback_t)(struct device *);
42
775b64d2 43/*
1eede070 44 * The entries in the dpm_list list are in a depth first order, simply
775b64d2
RW
45 * because children are guaranteed to be discovered after parents, and
46 * are inserted at the back of the list on discovery.
47 *
8e9394ce
GKH
48 * Since device_pm_add() may be called with a device lock held,
49 * we must never try to acquire a device lock while holding
775b64d2
RW
50 * dpm_list_mutex.
51 */
52
1eede070 53LIST_HEAD(dpm_list);
7664e969
SK
54static LIST_HEAD(dpm_prepared_list);
55static LIST_HEAD(dpm_suspended_list);
56static LIST_HEAD(dpm_late_early_list);
57static LIST_HEAD(dpm_noirq_list);
1da177e4 58
2a77c46d 59struct suspend_stats suspend_stats;
cd59abfc 60static DEFINE_MUTEX(dpm_list_mtx);
5af84b82 61static pm_message_t pm_transition;
1da177e4 62
098dff73
RW
63static int async_error;
64
952856db 65static const char *pm_verb(int event)
53644677
SK
66{
67 switch (event) {
68 case PM_EVENT_SUSPEND:
69 return "suspend";
70 case PM_EVENT_RESUME:
71 return "resume";
72 case PM_EVENT_FREEZE:
73 return "freeze";
74 case PM_EVENT_QUIESCE:
75 return "quiesce";
76 case PM_EVENT_HIBERNATE:
77 return "hibernate";
78 case PM_EVENT_THAW:
79 return "thaw";
80 case PM_EVENT_RESTORE:
81 return "restore";
82 case PM_EVENT_RECOVER:
83 return "recover";
84 default:
85 return "(unknown PM event)";
86 }
87}
88
5e928f77 89/**
e91c11b1 90 * device_pm_sleep_init - Initialize system suspend-related device fields.
5e928f77
RW
91 * @dev: Device object being initialized.
92 */
e91c11b1 93void device_pm_sleep_init(struct device *dev)
5e928f77 94{
f76b168b 95 dev->power.is_prepared = false;
6d0e0e84 96 dev->power.is_suspended = false;
3d2699bc
LC
97 dev->power.is_noirq_suspended = false;
98 dev->power.is_late_suspended = false;
5af84b82 99 init_completion(&dev->power.completion);
152e1d59 100 complete_all(&dev->power.completion);
074037ec 101 dev->power.wakeup = NULL;
22110faf 102 INIT_LIST_HEAD(&dev->power.entry);
5e928f77
RW
103}
104
1eede070 105/**
20d652d7 106 * device_pm_lock - Lock the list of active devices used by the PM core.
1eede070
RW
107 */
108void device_pm_lock(void)
109{
110 mutex_lock(&dpm_list_mtx);
111}
112
113/**
20d652d7 114 * device_pm_unlock - Unlock the list of active devices used by the PM core.
1eede070
RW
115 */
116void device_pm_unlock(void)
117{
118 mutex_unlock(&dpm_list_mtx);
119}
075c1771 120
775b64d2 121/**
20d652d7
RW
122 * device_pm_add - Add a device to the PM core's list of active devices.
123 * @dev: Device to add to the list.
775b64d2 124 */
3b98aeaf 125void device_pm_add(struct device *dev)
1da177e4 126{
1da177e4 127 pr_debug("PM: Adding info for %s:%s\n",
5c1a07ab 128 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
aa8e54b5 129 device_pm_check_callbacks(dev);
11048dcf 130 mutex_lock(&dpm_list_mtx);
f76b168b 131 if (dev->parent && dev->parent->power.is_prepared)
b64959e6
RW
132 dev_warn(dev, "parent %s should not be sleeping\n",
133 dev_name(dev->parent));
3b98aeaf 134 list_add_tail(&dev->power.entry, &dpm_list);
9ed98953 135 dev->power.in_dpm_list = true;
1a9a9152 136 mutex_unlock(&dpm_list_mtx);
1da177e4
LT
137}
138
775b64d2 139/**
20d652d7
RW
140 * device_pm_remove - Remove a device from the PM core's list of active devices.
141 * @dev: Device to be removed from the list.
775b64d2 142 */
9cddad77 143void device_pm_remove(struct device *dev)
1da177e4
LT
144{
145 pr_debug("PM: Removing info for %s:%s\n",
5c1a07ab 146 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
5af84b82 147 complete_all(&dev->power.completion);
11048dcf 148 mutex_lock(&dpm_list_mtx);
1da177e4 149 list_del_init(&dev->power.entry);
9ed98953 150 dev->power.in_dpm_list = false;
11048dcf 151 mutex_unlock(&dpm_list_mtx);
074037ec 152 device_wakeup_disable(dev);
5e928f77 153 pm_runtime_remove(dev);
aa8e54b5 154 device_pm_check_callbacks(dev);
775b64d2
RW
155}
156
ffa6a705 157/**
20d652d7
RW
158 * device_pm_move_before - Move device in the PM core's list of active devices.
159 * @deva: Device to move in dpm_list.
160 * @devb: Device @deva should come before.
ffa6a705
CH
161 */
162void device_pm_move_before(struct device *deva, struct device *devb)
163{
164 pr_debug("PM: Moving %s:%s before %s:%s\n",
5c1a07ab
RW
165 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
166 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
ffa6a705
CH
167 /* Delete deva from dpm_list and reinsert before devb. */
168 list_move_tail(&deva->power.entry, &devb->power.entry);
169}
170
171/**
20d652d7
RW
172 * device_pm_move_after - Move device in the PM core's list of active devices.
173 * @deva: Device to move in dpm_list.
174 * @devb: Device @deva should come after.
ffa6a705
CH
175 */
176void device_pm_move_after(struct device *deva, struct device *devb)
177{
178 pr_debug("PM: Moving %s:%s after %s:%s\n",
5c1a07ab
RW
179 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
180 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
ffa6a705
CH
181 /* Delete deva from dpm_list and reinsert after devb. */
182 list_move(&deva->power.entry, &devb->power.entry);
183}
184
185/**
20d652d7
RW
186 * device_pm_move_last - Move device to end of the PM core's list of devices.
187 * @dev: Device to move in dpm_list.
ffa6a705
CH
188 */
189void device_pm_move_last(struct device *dev)
190{
191 pr_debug("PM: Moving %s:%s to end of list\n",
5c1a07ab 192 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
ffa6a705
CH
193 list_move_tail(&dev->power.entry, &dpm_list);
194}
195
875ab0b7
RW
196static ktime_t initcall_debug_start(struct device *dev)
197{
8b0e1953 198 ktime_t calltime = 0;
875ab0b7 199
b2df1d4f 200 if (pm_print_times_enabled) {
0c6aebe3
RW
201 pr_info("calling %s+ @ %i, parent: %s\n",
202 dev_name(dev), task_pid_nr(current),
203 dev->parent ? dev_name(dev->parent) : "none");
875ab0b7
RW
204 calltime = ktime_get();
205 }
206
207 return calltime;
208}
209
210static void initcall_debug_report(struct device *dev, ktime_t calltime,
e3771fa9
KK
211 int error, pm_message_t state,
212 const char *info)
875ab0b7 213{
53644677
SK
214 ktime_t rettime;
215 s64 nsecs;
216
217 rettime = ktime_get();
218 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
875ab0b7 219
b2df1d4f 220 if (pm_print_times_enabled) {
875ab0b7 221 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
53644677 222 error, (unsigned long long)nsecs >> 10);
875ab0b7
RW
223 }
224}
225
5af84b82
RW
226/**
227 * dpm_wait - Wait for a PM operation to complete.
228 * @dev: Device to wait for.
229 * @async: If unset, wait only if the device's power.async_suspend flag is set.
230 */
231static void dpm_wait(struct device *dev, bool async)
232{
233 if (!dev)
234 return;
235
0e06b4a8 236 if (async || (pm_async_enabled && dev->power.async_suspend))
5af84b82
RW
237 wait_for_completion(&dev->power.completion);
238}
239
240static int dpm_wait_fn(struct device *dev, void *async_ptr)
241{
242 dpm_wait(dev, *((bool *)async_ptr));
243 return 0;
244}
245
246static void dpm_wait_for_children(struct device *dev, bool async)
247{
248 device_for_each_child(dev, &async, dpm_wait_fn);
249}
250
8c73b428
RW
251static void dpm_wait_for_suppliers(struct device *dev, bool async)
252{
253 struct device_link *link;
254 int idx;
255
256 idx = device_links_read_lock();
257
258 /*
259 * If the supplier goes away right after we've checked the link to it,
260 * we'll wait for its completion to change the state, but that's fine,
261 * because the only things that will block as a result are the SRCU
262 * callbacks freeing the link objects for the links in the list we're
263 * walking.
264 */
265 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
266 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
267 dpm_wait(link->supplier, async);
268
269 device_links_read_unlock(idx);
270}
271
272static void dpm_wait_for_superior(struct device *dev, bool async)
273{
274 dpm_wait(dev->parent, async);
275 dpm_wait_for_suppliers(dev, async);
276}
277
278static void dpm_wait_for_consumers(struct device *dev, bool async)
279{
280 struct device_link *link;
281 int idx;
282
283 idx = device_links_read_lock();
284
285 /*
286 * The status of a device link can only be changed from "dormant" by a
287 * probe, but that cannot happen during system suspend/resume. In
288 * theory it can change to "dormant" at that time, but then it is
289 * reasonable to wait for the target device anyway (eg. if it goes
290 * away, it's better to wait for it to go away completely and then
291 * continue instead of trying to continue in parallel with its
292 * unregistration).
293 */
294 list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
295 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
296 dpm_wait(link->consumer, async);
297
298 device_links_read_unlock(idx);
299}
300
301static void dpm_wait_for_subordinate(struct device *dev, bool async)
302{
303 dpm_wait_for_children(dev, async);
304 dpm_wait_for_consumers(dev, async);
305}
306
1eede070 307/**
9cf519d1 308 * pm_op - Return the PM operation appropriate for given PM event.
20d652d7
RW
309 * @ops: PM operations to choose from.
310 * @state: PM transition of the system being carried out.
1eede070 311 */
9cf519d1 312static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
1eede070 313{
1eede070
RW
314 switch (state.event) {
315#ifdef CONFIG_SUSPEND
316 case PM_EVENT_SUSPEND:
9cf519d1 317 return ops->suspend;
1eede070 318 case PM_EVENT_RESUME:
9cf519d1 319 return ops->resume;
1eede070 320#endif /* CONFIG_SUSPEND */
1f112cee 321#ifdef CONFIG_HIBERNATE_CALLBACKS
1eede070
RW
322 case PM_EVENT_FREEZE:
323 case PM_EVENT_QUIESCE:
9cf519d1 324 return ops->freeze;
1eede070 325 case PM_EVENT_HIBERNATE:
9cf519d1 326 return ops->poweroff;
1eede070
RW
327 case PM_EVENT_THAW:
328 case PM_EVENT_RECOVER:
9cf519d1 329 return ops->thaw;
1eede070
RW
330 break;
331 case PM_EVENT_RESTORE:
9cf519d1 332 return ops->restore;
1f112cee 333#endif /* CONFIG_HIBERNATE_CALLBACKS */
1eede070 334 }
f2511774 335
9cf519d1 336 return NULL;
1eede070
RW
337}
338
cf579dfb
RW
339/**
340 * pm_late_early_op - Return the PM operation appropriate for given PM event.
341 * @ops: PM operations to choose from.
342 * @state: PM transition of the system being carried out.
343 *
344 * Runtime PM is disabled for @dev while this function is being executed.
345 */
346static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
347 pm_message_t state)
348{
349 switch (state.event) {
350#ifdef CONFIG_SUSPEND
351 case PM_EVENT_SUSPEND:
352 return ops->suspend_late;
353 case PM_EVENT_RESUME:
354 return ops->resume_early;
355#endif /* CONFIG_SUSPEND */
356#ifdef CONFIG_HIBERNATE_CALLBACKS
357 case PM_EVENT_FREEZE:
358 case PM_EVENT_QUIESCE:
359 return ops->freeze_late;
360 case PM_EVENT_HIBERNATE:
361 return ops->poweroff_late;
362 case PM_EVENT_THAW:
363 case PM_EVENT_RECOVER:
364 return ops->thaw_early;
365 case PM_EVENT_RESTORE:
366 return ops->restore_early;
367#endif /* CONFIG_HIBERNATE_CALLBACKS */
368 }
369
370 return NULL;
371}
372
1eede070 373/**
9cf519d1 374 * pm_noirq_op - Return the PM operation appropriate for given PM event.
20d652d7
RW
375 * @ops: PM operations to choose from.
376 * @state: PM transition of the system being carried out.
1eede070 377 *
20d652d7
RW
378 * The driver of @dev will not receive interrupts while this function is being
379 * executed.
1eede070 380 */
9cf519d1 381static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
1eede070 382{
1eede070
RW
383 switch (state.event) {
384#ifdef CONFIG_SUSPEND
385 case PM_EVENT_SUSPEND:
9cf519d1 386 return ops->suspend_noirq;
1eede070 387 case PM_EVENT_RESUME:
9cf519d1 388 return ops->resume_noirq;
1eede070 389#endif /* CONFIG_SUSPEND */
1f112cee 390#ifdef CONFIG_HIBERNATE_CALLBACKS
1eede070
RW
391 case PM_EVENT_FREEZE:
392 case PM_EVENT_QUIESCE:
9cf519d1 393 return ops->freeze_noirq;
1eede070 394 case PM_EVENT_HIBERNATE:
9cf519d1 395 return ops->poweroff_noirq;
1eede070
RW
396 case PM_EVENT_THAW:
397 case PM_EVENT_RECOVER:
9cf519d1 398 return ops->thaw_noirq;
1eede070 399 case PM_EVENT_RESTORE:
9cf519d1 400 return ops->restore_noirq;
1f112cee 401#endif /* CONFIG_HIBERNATE_CALLBACKS */
1eede070 402 }
f2511774 403
9cf519d1 404 return NULL;
1eede070
RW
405}
406
e3771fa9 407static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
1eede070
RW
408{
409 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
410 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
411 ", may wakeup" : "");
412}
413
e3771fa9 414static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
1eede070
RW
415 int error)
416{
417 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
5c1a07ab 418 dev_name(dev), pm_verb(state.event), info, error);
1eede070
RW
419}
420
604d8958 421#ifdef CONFIG_PM_DEBUG
e3771fa9
KK
422static void dpm_show_time(ktime_t starttime, pm_message_t state,
423 const char *info)
ecf762b2
RW
424{
425 ktime_t calltime;
0702d9ee 426 u64 usecs64;
ecf762b2
RW
427 int usecs;
428
429 calltime = ktime_get();
430 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
431 do_div(usecs64, NSEC_PER_USEC);
432 usecs = usecs64;
433 if (usecs == 0)
434 usecs = 1;
435 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
436 info ?: "", info ? " " : "", pm_verb(state.event),
437 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
438}
604d8958 439#else
8f8e5c3e
RW
440static inline void dpm_show_time(ktime_t starttime, pm_message_t state,
441 const char *info) {}
604d8958 442#endif /* CONFIG_PM_DEBUG */
ecf762b2 443
9cf519d1 444static int dpm_run_callback(pm_callback_t cb, struct device *dev,
e3771fa9 445 pm_message_t state, const char *info)
9cf519d1
RW
446{
447 ktime_t calltime;
448 int error;
449
450 if (!cb)
451 return 0;
452
453 calltime = initcall_debug_start(dev);
454
455 pm_dev_dbg(dev, state, info);
e8bca479 456 trace_device_pm_callback_start(dev, info, state.event);
9cf519d1 457 error = cb(dev);
e8bca479 458 trace_device_pm_callback_end(dev, error);
9cf519d1
RW
459 suspend_report_result(cb, error);
460
53644677 461 initcall_debug_report(dev, calltime, error, state, info);
9cf519d1
RW
462
463 return error;
464}
465
70fea60d
BG
466#ifdef CONFIG_DPM_WATCHDOG
467struct dpm_watchdog {
468 struct device *dev;
469 struct task_struct *tsk;
470 struct timer_list timer;
471};
472
473#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
474 struct dpm_watchdog wd
475
476/**
477 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
478 * @data: Watchdog object address.
479 *
480 * Called when a driver has timed out suspending or resuming.
481 * There's not much we can do here to recover so panic() to
482 * capture a crash-dump in pstore.
483 */
484static void dpm_watchdog_handler(unsigned long data)
485{
486 struct dpm_watchdog *wd = (void *)data;
487
488 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
489 show_stack(wd->tsk, NULL);
490 panic("%s %s: unrecoverable failure\n",
491 dev_driver_string(wd->dev), dev_name(wd->dev));
492}
493
494/**
495 * dpm_watchdog_set - Enable pm watchdog for given device.
496 * @wd: Watchdog. Must be allocated on the stack.
497 * @dev: Device to handle.
498 */
499static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
500{
501 struct timer_list *timer = &wd->timer;
502
503 wd->dev = dev;
504 wd->tsk = current;
505
506 init_timer_on_stack(timer);
507 /* use same timeout value for both suspend and resume */
508 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
509 timer->function = dpm_watchdog_handler;
510 timer->data = (unsigned long)wd;
511 add_timer(timer);
512}
513
514/**
515 * dpm_watchdog_clear - Disable suspend/resume watchdog.
516 * @wd: Watchdog to disable.
517 */
518static void dpm_watchdog_clear(struct dpm_watchdog *wd)
519{
520 struct timer_list *timer = &wd->timer;
521
522 del_timer_sync(timer);
523 destroy_timer_on_stack(timer);
524}
525#else
526#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
527#define dpm_watchdog_set(x, y)
528#define dpm_watchdog_clear(x)
529#endif
530
cd59abfc
AS
531/*------------------------- Resume routines -------------------------*/
532
533/**
20d652d7
RW
534 * device_resume_noirq - Execute an "early resume" callback for given device.
535 * @dev: Device to handle.
536 * @state: PM transition of the system being carried out.
58c256a3 537 * @async: If true, the device is being resumed asynchronously.
cd59abfc 538 *
20d652d7
RW
539 * The driver of @dev will not receive interrupts while this function is being
540 * executed.
cd59abfc 541 */
76569faa 542static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
cd59abfc 543{
9cf519d1 544 pm_callback_t callback = NULL;
e3771fa9 545 const char *info = NULL;
cd59abfc
AS
546 int error = 0;
547
548 TRACE_DEVICE(dev);
549 TRACE_RESUME(0);
550
aae4518b 551 if (dev->power.syscore || dev->power.direct_complete)
dbf37414
RW
552 goto Out;
553
3d2699bc
LC
554 if (!dev->power.is_noirq_suspended)
555 goto Out;
556
8c73b428 557 dpm_wait_for_superior(dev, async);
76569faa 558
564b905a 559 if (dev->pm_domain) {
cf579dfb 560 info = "noirq power domain ";
9cf519d1 561 callback = pm_noirq_op(&dev->pm_domain->ops, state);
4d27e9dc 562 } else if (dev->type && dev->type->pm) {
cf579dfb 563 info = "noirq type ";
9cf519d1 564 callback = pm_noirq_op(dev->type->pm, state);
9659cc06 565 } else if (dev->class && dev->class->pm) {
cf579dfb 566 info = "noirq class ";
9cf519d1 567 callback = pm_noirq_op(dev->class->pm, state);
9659cc06 568 } else if (dev->bus && dev->bus->pm) {
cf579dfb 569 info = "noirq bus ";
9cf519d1 570 callback = pm_noirq_op(dev->bus->pm, state);
e7176a37
DB
571 }
572
35cd133c 573 if (!callback && dev->driver && dev->driver->pm) {
cf579dfb 574 info = "noirq driver ";
35cd133c
RW
575 callback = pm_noirq_op(dev->driver->pm, state);
576 }
577
9cf519d1 578 error = dpm_run_callback(callback, dev, state, info);
3d2699bc 579 dev->power.is_noirq_suspended = false;
9cf519d1 580
dbf37414 581 Out:
76569faa 582 complete_all(&dev->power.completion);
775b64d2
RW
583 TRACE_RESUME(error);
584 return error;
585}
586
76569faa
LC
587static bool is_async(struct device *dev)
588{
589 return dev->power.async_suspend && pm_async_enabled
590 && !pm_trace_is_enabled();
591}
592
593static void async_resume_noirq(void *data, async_cookie_t cookie)
594{
595 struct device *dev = (struct device *)data;
596 int error;
597
598 error = device_resume_noirq(dev, pm_transition, true);
599 if (error)
600 pm_dev_err(dev, pm_transition, " async", error);
601
602 put_device(dev);
603}
604
775b64d2 605/**
cf579dfb 606 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
20d652d7 607 * @state: PM transition of the system being carried out.
775b64d2 608 *
cf579dfb 609 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
20d652d7 610 * enable device drivers to receive interrupts.
775b64d2 611 */
2a8a8ce6 612void dpm_resume_noirq(pm_message_t state)
775b64d2 613{
76569faa 614 struct device *dev;
ecf762b2 615 ktime_t starttime = ktime_get();
775b64d2 616
bb3632c6 617 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
32bdfac5 618 mutex_lock(&dpm_list_mtx);
76569faa 619 pm_transition = state;
d08a5ace 620
76569faa
LC
621 /*
622 * Advanced the async threads upfront,
623 * in case the starting of async threads is
624 * delayed by non-async resuming devices.
625 */
626 list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
627 reinit_completion(&dev->power.completion);
628 if (is_async(dev)) {
629 get_device(dev);
630 async_schedule(async_resume_noirq, dev);
631 }
632 }
633
634 while (!list_empty(&dpm_noirq_list)) {
635 dev = to_device(dpm_noirq_list.next);
d08a5ace 636 get_device(dev);
cf579dfb 637 list_move_tail(&dev->power.entry, &dpm_late_early_list);
5b219a51 638 mutex_unlock(&dpm_list_mtx);
d08a5ace 639
76569faa
LC
640 if (!is_async(dev)) {
641 int error;
642
643 error = device_resume_noirq(dev, state, false);
644 if (error) {
645 suspend_stats.failed_resume_noirq++;
646 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
647 dpm_save_failed_dev(dev_name(dev));
648 pm_dev_err(dev, state, " noirq", error);
649 }
cf579dfb
RW
650 }
651
652 mutex_lock(&dpm_list_mtx);
653 put_device(dev);
654 }
655 mutex_unlock(&dpm_list_mtx);
76569faa 656 async_synchronize_full();
cf579dfb
RW
657 dpm_show_time(starttime, state, "noirq");
658 resume_device_irqs();
4990d4fe 659 device_wakeup_disarm_wake_irqs();
8651f97b 660 cpuidle_resume();
bb3632c6 661 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
cf579dfb
RW
662}
663
664/**
665 * device_resume_early - Execute an "early resume" callback for given device.
666 * @dev: Device to handle.
667 * @state: PM transition of the system being carried out.
58c256a3 668 * @async: If true, the device is being resumed asynchronously.
cf579dfb
RW
669 *
670 * Runtime PM is disabled for @dev while this function is being executed.
671 */
9e5e7910 672static int device_resume_early(struct device *dev, pm_message_t state, bool async)
cf579dfb
RW
673{
674 pm_callback_t callback = NULL;
e3771fa9 675 const char *info = NULL;
cf579dfb
RW
676 int error = 0;
677
678 TRACE_DEVICE(dev);
679 TRACE_RESUME(0);
680
aae4518b 681 if (dev->power.syscore || dev->power.direct_complete)
dbf37414
RW
682 goto Out;
683
3d2699bc
LC
684 if (!dev->power.is_late_suspended)
685 goto Out;
686
8c73b428 687 dpm_wait_for_superior(dev, async);
9e5e7910 688
cf579dfb
RW
689 if (dev->pm_domain) {
690 info = "early power domain ";
691 callback = pm_late_early_op(&dev->pm_domain->ops, state);
692 } else if (dev->type && dev->type->pm) {
693 info = "early type ";
694 callback = pm_late_early_op(dev->type->pm, state);
695 } else if (dev->class && dev->class->pm) {
696 info = "early class ";
697 callback = pm_late_early_op(dev->class->pm, state);
698 } else if (dev->bus && dev->bus->pm) {
699 info = "early bus ";
700 callback = pm_late_early_op(dev->bus->pm, state);
701 }
702
703 if (!callback && dev->driver && dev->driver->pm) {
704 info = "early driver ";
705 callback = pm_late_early_op(dev->driver->pm, state);
706 }
707
708 error = dpm_run_callback(callback, dev, state, info);
3d2699bc 709 dev->power.is_late_suspended = false;
cf579dfb 710
dbf37414 711 Out:
cf579dfb 712 TRACE_RESUME(error);
9f6d8f6a
RW
713
714 pm_runtime_enable(dev);
9e5e7910 715 complete_all(&dev->power.completion);
cf579dfb
RW
716 return error;
717}
718
9e5e7910
LC
719static void async_resume_early(void *data, async_cookie_t cookie)
720{
721 struct device *dev = (struct device *)data;
722 int error;
723
724 error = device_resume_early(dev, pm_transition, true);
725 if (error)
726 pm_dev_err(dev, pm_transition, " async", error);
727
728 put_device(dev);
729}
730
cf579dfb
RW
731/**
732 * dpm_resume_early - Execute "early resume" callbacks for all devices.
733 * @state: PM transition of the system being carried out.
734 */
2a8a8ce6 735void dpm_resume_early(pm_message_t state)
cf579dfb 736{
9e5e7910 737 struct device *dev;
cf579dfb
RW
738 ktime_t starttime = ktime_get();
739
bb3632c6 740 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
cf579dfb 741 mutex_lock(&dpm_list_mtx);
9e5e7910
LC
742 pm_transition = state;
743
744 /*
745 * Advanced the async threads upfront,
746 * in case the starting of async threads is
747 * delayed by non-async resuming devices.
748 */
749 list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
750 reinit_completion(&dev->power.completion);
751 if (is_async(dev)) {
752 get_device(dev);
753 async_schedule(async_resume_early, dev);
754 }
755 }
cf579dfb 756
9e5e7910
LC
757 while (!list_empty(&dpm_late_early_list)) {
758 dev = to_device(dpm_late_early_list.next);
cf579dfb
RW
759 get_device(dev);
760 list_move_tail(&dev->power.entry, &dpm_suspended_list);
761 mutex_unlock(&dpm_list_mtx);
762
9e5e7910
LC
763 if (!is_async(dev)) {
764 int error;
d08a5ace 765
9e5e7910
LC
766 error = device_resume_early(dev, state, false);
767 if (error) {
768 suspend_stats.failed_resume_early++;
769 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
770 dpm_save_failed_dev(dev_name(dev));
771 pm_dev_err(dev, state, " early", error);
772 }
773 }
5b219a51 774 mutex_lock(&dpm_list_mtx);
d08a5ace
RW
775 put_device(dev);
776 }
32bdfac5 777 mutex_unlock(&dpm_list_mtx);
9e5e7910 778 async_synchronize_full();
ecf762b2 779 dpm_show_time(starttime, state, "early");
bb3632c6 780 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
775b64d2 781}
cf579dfb
RW
782
783/**
784 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
785 * @state: PM transition of the system being carried out.
786 */
787void dpm_resume_start(pm_message_t state)
788{
789 dpm_resume_noirq(state);
790 dpm_resume_early(state);
791}
792EXPORT_SYMBOL_GPL(dpm_resume_start);
775b64d2
RW
793
794/**
97df8c12 795 * device_resume - Execute "resume" callbacks for given device.
20d652d7
RW
796 * @dev: Device to handle.
797 * @state: PM transition of the system being carried out.
5af84b82 798 * @async: If true, the device is being resumed asynchronously.
775b64d2 799 */
97df8c12 800static int device_resume(struct device *dev, pm_message_t state, bool async)
775b64d2 801{
9cf519d1 802 pm_callback_t callback = NULL;
e3771fa9 803 const char *info = NULL;
775b64d2 804 int error = 0;
70fea60d 805 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
775b64d2
RW
806
807 TRACE_DEVICE(dev);
808 TRACE_RESUME(0);
cd59abfc 809
dbf37414
RW
810 if (dev->power.syscore)
811 goto Complete;
812
aae4518b
RW
813 if (dev->power.direct_complete) {
814 /* Match the pm_runtime_disable() in __device_suspend(). */
815 pm_runtime_enable(dev);
816 goto Complete;
817 }
818
8c73b428 819 dpm_wait_for_superior(dev, async);
70fea60d 820 dpm_watchdog_set(&wd, dev);
8e9394ce 821 device_lock(dev);
7a8d37a3 822
f76b168b
AS
823 /*
824 * This is a fib. But we'll allow new children to be added below
825 * a resumed device, even if the device hasn't been completed yet.
826 */
827 dev->power.is_prepared = false;
97df8c12 828
6d0e0e84
AS
829 if (!dev->power.is_suspended)
830 goto Unlock;
831
564b905a 832 if (dev->pm_domain) {
9cf519d1
RW
833 info = "power domain ";
834 callback = pm_op(&dev->pm_domain->ops, state);
35cd133c 835 goto Driver;
7538e3db
RW
836 }
837
9659cc06 838 if (dev->type && dev->type->pm) {
9cf519d1
RW
839 info = "type ";
840 callback = pm_op(dev->type->pm, state);
35cd133c 841 goto Driver;
cd59abfc
AS
842 }
843
1eede070
RW
844 if (dev->class) {
845 if (dev->class->pm) {
9cf519d1
RW
846 info = "class ";
847 callback = pm_op(dev->class->pm, state);
35cd133c 848 goto Driver;
1eede070 849 } else if (dev->class->resume) {
9cf519d1
RW
850 info = "legacy class ";
851 callback = dev->class->resume;
9659cc06 852 goto End;
1eede070 853 }
cd59abfc 854 }
9659cc06
RW
855
856 if (dev->bus) {
857 if (dev->bus->pm) {
35cd133c 858 info = "bus ";
9cf519d1 859 callback = pm_op(dev->bus->pm, state);
9659cc06 860 } else if (dev->bus->resume) {
35cd133c 861 info = "legacy bus ";
9cf519d1 862 callback = dev->bus->resume;
35cd133c 863 goto End;
9659cc06
RW
864 }
865 }
866
35cd133c
RW
867 Driver:
868 if (!callback && dev->driver && dev->driver->pm) {
869 info = "driver ";
870 callback = pm_op(dev->driver->pm, state);
871 }
872
1eede070 873 End:
9cf519d1 874 error = dpm_run_callback(callback, dev, state, info);
6d0e0e84
AS
875 dev->power.is_suspended = false;
876
877 Unlock:
8e9394ce 878 device_unlock(dev);
70fea60d 879 dpm_watchdog_clear(&wd);
dbf37414
RW
880
881 Complete:
5af84b82 882 complete_all(&dev->power.completion);
7a8d37a3 883
cd59abfc 884 TRACE_RESUME(error);
1e2ef05b 885
cd59abfc
AS
886 return error;
887}
888
5af84b82
RW
889static void async_resume(void *data, async_cookie_t cookie)
890{
891 struct device *dev = (struct device *)data;
892 int error;
893
97df8c12 894 error = device_resume(dev, pm_transition, true);
5af84b82
RW
895 if (error)
896 pm_dev_err(dev, pm_transition, " async", error);
897 put_device(dev);
898}
899
775b64d2 900/**
20d652d7
RW
901 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
902 * @state: PM transition of the system being carried out.
775b64d2 903 *
20d652d7
RW
904 * Execute the appropriate "resume" callback for all devices whose status
905 * indicates that they are suspended.
1eede070 906 */
91e7c75b 907void dpm_resume(pm_message_t state)
1eede070 908{
97df8c12 909 struct device *dev;
ecf762b2 910 ktime_t starttime = ktime_get();
1eede070 911
bb3632c6 912 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
91e7c75b
RW
913 might_sleep();
914
1eede070 915 mutex_lock(&dpm_list_mtx);
5af84b82 916 pm_transition = state;
098dff73 917 async_error = 0;
1eede070 918
8a43a9ab 919 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
16735d02 920 reinit_completion(&dev->power.completion);
97df8c12
RW
921 if (is_async(dev)) {
922 get_device(dev);
923 async_schedule(async_resume, dev);
924 }
925 }
926
8a43a9ab
RW
927 while (!list_empty(&dpm_suspended_list)) {
928 dev = to_device(dpm_suspended_list.next);
1eede070 929 get_device(dev);
5b219a51 930 if (!is_async(dev)) {
1eede070
RW
931 int error;
932
1eede070
RW
933 mutex_unlock(&dpm_list_mtx);
934
97df8c12 935 error = device_resume(dev, state, false);
2a77c46d
SL
936 if (error) {
937 suspend_stats.failed_resume++;
938 dpm_save_failed_step(SUSPEND_RESUME);
939 dpm_save_failed_dev(dev_name(dev));
1eede070 940 pm_dev_err(dev, state, "", error);
2a77c46d 941 }
5b219a51
RW
942
943 mutex_lock(&dpm_list_mtx);
1eede070
RW
944 }
945 if (!list_empty(&dev->power.entry))
8a43a9ab 946 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1eede070
RW
947 put_device(dev);
948 }
1eede070 949 mutex_unlock(&dpm_list_mtx);
5af84b82 950 async_synchronize_full();
ecf762b2 951 dpm_show_time(starttime, state, NULL);
2f0aea93
VK
952
953 cpufreq_resume();
bb3632c6 954 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1eede070
RW
955}
956
957/**
20d652d7
RW
958 * device_complete - Complete a PM transition for given device.
959 * @dev: Device to handle.
960 * @state: PM transition of the system being carried out.
1eede070 961 */
d1616302 962static void device_complete(struct device *dev, pm_message_t state)
1eede070 963{
35cd133c 964 void (*callback)(struct device *) = NULL;
e3771fa9 965 const char *info = NULL;
35cd133c 966
dbf37414
RW
967 if (dev->power.syscore)
968 return;
969
8e9394ce 970 device_lock(dev);
1eede070 971
564b905a 972 if (dev->pm_domain) {
35cd133c
RW
973 info = "completing power domain ";
974 callback = dev->pm_domain->ops.complete;
4d27e9dc 975 } else if (dev->type && dev->type->pm) {
35cd133c
RW
976 info = "completing type ";
977 callback = dev->type->pm->complete;
9659cc06 978 } else if (dev->class && dev->class->pm) {
35cd133c
RW
979 info = "completing class ";
980 callback = dev->class->pm->complete;
9659cc06 981 } else if (dev->bus && dev->bus->pm) {
35cd133c
RW
982 info = "completing bus ";
983 callback = dev->bus->pm->complete;
984 }
985
986 if (!callback && dev->driver && dev->driver->pm) {
987 info = "completing driver ";
988 callback = dev->driver->pm->complete;
989 }
990
991 if (callback) {
992 pm_dev_dbg(dev, state, info);
993 callback(dev);
1eede070
RW
994 }
995
8e9394ce 996 device_unlock(dev);
88d26136 997
af939339 998 pm_runtime_put(dev);
1eede070
RW
999}
1000
1001/**
20d652d7
RW
1002 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1003 * @state: PM transition of the system being carried out.
775b64d2 1004 *
20d652d7
RW
1005 * Execute the ->complete() callbacks for all devices whose PM status is not
1006 * DPM_ON (this allows new devices to be registered).
cd59abfc 1007 */
91e7c75b 1008void dpm_complete(pm_message_t state)
cd59abfc 1009{
1eede070
RW
1010 struct list_head list;
1011
bb3632c6 1012 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
91e7c75b
RW
1013 might_sleep();
1014
1eede070 1015 INIT_LIST_HEAD(&list);
cd59abfc 1016 mutex_lock(&dpm_list_mtx);
8a43a9ab
RW
1017 while (!list_empty(&dpm_prepared_list)) {
1018 struct device *dev = to_device(dpm_prepared_list.prev);
cd59abfc 1019
1eede070 1020 get_device(dev);
f76b168b 1021 dev->power.is_prepared = false;
5b219a51
RW
1022 list_move(&dev->power.entry, &list);
1023 mutex_unlock(&dpm_list_mtx);
1eede070 1024
32e8d689 1025 trace_device_pm_callback_start(dev, "", state.event);
5b219a51 1026 device_complete(dev, state);
32e8d689 1027 trace_device_pm_callback_end(dev, 0);
1eede070 1028
5b219a51 1029 mutex_lock(&dpm_list_mtx);
1eede070 1030 put_device(dev);
cd59abfc 1031 }
1eede070 1032 list_splice(&list, &dpm_list);
cd59abfc 1033 mutex_unlock(&dpm_list_mtx);
013c074f
SG
1034
1035 /* Allow device probing and trigger re-probing of deferred devices */
1036 device_unblock_probing();
bb3632c6 1037 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
cd59abfc
AS
1038}
1039
cd59abfc 1040/**
20d652d7
RW
1041 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1042 * @state: PM transition of the system being carried out.
cd59abfc 1043 *
20d652d7
RW
1044 * Execute "resume" callbacks for all devices and complete the PM transition of
1045 * the system.
cd59abfc 1046 */
d1616302 1047void dpm_resume_end(pm_message_t state)
cd59abfc 1048{
1eede070
RW
1049 dpm_resume(state);
1050 dpm_complete(state);
cd59abfc 1051}
d1616302 1052EXPORT_SYMBOL_GPL(dpm_resume_end);
cd59abfc
AS
1053
1054
1055/*------------------------- Suspend routines -------------------------*/
1056
1eede070 1057/**
20d652d7
RW
1058 * resume_event - Return a "resume" message for given "suspend" sleep state.
1059 * @sleep_state: PM message representing a sleep state.
1060 *
1061 * Return a PM message representing the resume event corresponding to given
1062 * sleep state.
1eede070
RW
1063 */
1064static pm_message_t resume_event(pm_message_t sleep_state)
cd59abfc 1065{
1eede070
RW
1066 switch (sleep_state.event) {
1067 case PM_EVENT_SUSPEND:
1068 return PMSG_RESUME;
1069 case PM_EVENT_FREEZE:
1070 case PM_EVENT_QUIESCE:
1071 return PMSG_RECOVER;
1072 case PM_EVENT_HIBERNATE:
1073 return PMSG_RESTORE;
cd59abfc 1074 }
1eede070 1075 return PMSG_ON;
cd59abfc
AS
1076}
1077
1078/**
20d652d7
RW
1079 * device_suspend_noirq - Execute a "late suspend" callback for given device.
1080 * @dev: Device to handle.
1081 * @state: PM transition of the system being carried out.
58c256a3 1082 * @async: If true, the device is being suspended asynchronously.
775b64d2 1083 *
20d652d7
RW
1084 * The driver of @dev will not receive interrupts while this function is being
1085 * executed.
cd59abfc 1086 */
28b6fd6e 1087static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
775b64d2 1088{
9cf519d1 1089 pm_callback_t callback = NULL;
e3771fa9 1090 const char *info = NULL;
28b6fd6e
LC
1091 int error = 0;
1092
431d452a
ZF
1093 TRACE_DEVICE(dev);
1094 TRACE_SUSPEND(0);
1095
098c3055 1096 dpm_wait_for_subordinate(dev, async);
6f75c3fd 1097
28b6fd6e
LC
1098 if (async_error)
1099 goto Complete;
1100
aae4518b 1101 if (dev->power.syscore || dev->power.direct_complete)
28b6fd6e
LC
1102 goto Complete;
1103
564b905a 1104 if (dev->pm_domain) {
cf579dfb 1105 info = "noirq power domain ";
9cf519d1 1106 callback = pm_noirq_op(&dev->pm_domain->ops, state);
4d27e9dc 1107 } else if (dev->type && dev->type->pm) {
cf579dfb 1108 info = "noirq type ";
9cf519d1 1109 callback = pm_noirq_op(dev->type->pm, state);
9659cc06 1110 } else if (dev->class && dev->class->pm) {
cf579dfb 1111 info = "noirq class ";
9cf519d1 1112 callback = pm_noirq_op(dev->class->pm, state);
9659cc06 1113 } else if (dev->bus && dev->bus->pm) {
cf579dfb 1114 info = "noirq bus ";
9cf519d1 1115 callback = pm_noirq_op(dev->bus->pm, state);
7538e3db
RW
1116 }
1117
35cd133c 1118 if (!callback && dev->driver && dev->driver->pm) {
cf579dfb 1119 info = "noirq driver ";
35cd133c
RW
1120 callback = pm_noirq_op(dev->driver->pm, state);
1121 }
1122
3d2699bc
LC
1123 error = dpm_run_callback(callback, dev, state, info);
1124 if (!error)
1125 dev->power.is_noirq_suspended = true;
28b6fd6e
LC
1126 else
1127 async_error = error;
3d2699bc 1128
28b6fd6e
LC
1129Complete:
1130 complete_all(&dev->power.completion);
431d452a 1131 TRACE_SUSPEND(error);
3d2699bc 1132 return error;
775b64d2
RW
1133}
1134
28b6fd6e
LC
1135static void async_suspend_noirq(void *data, async_cookie_t cookie)
1136{
1137 struct device *dev = (struct device *)data;
1138 int error;
1139
1140 error = __device_suspend_noirq(dev, pm_transition, true);
1141 if (error) {
1142 dpm_save_failed_dev(dev_name(dev));
1143 pm_dev_err(dev, pm_transition, " async", error);
1144 }
1145
1146 put_device(dev);
1147}
1148
1149static int device_suspend_noirq(struct device *dev)
1150{
1151 reinit_completion(&dev->power.completion);
1152
431d452a 1153 if (is_async(dev)) {
28b6fd6e
LC
1154 get_device(dev);
1155 async_schedule(async_suspend_noirq, dev);
1156 return 0;
1157 }
1158 return __device_suspend_noirq(dev, pm_transition, false);
1159}
1160
775b64d2 1161/**
cf579dfb 1162 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
20d652d7 1163 * @state: PM transition of the system being carried out.
775b64d2 1164 *
20d652d7
RW
1165 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1166 * handlers for all non-sysdev devices.
775b64d2 1167 */
2a8a8ce6 1168int dpm_suspend_noirq(pm_message_t state)
775b64d2 1169{
ecf762b2 1170 ktime_t starttime = ktime_get();
775b64d2
RW
1171 int error = 0;
1172
bb3632c6 1173 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
8651f97b 1174 cpuidle_pause();
4990d4fe 1175 device_wakeup_arm_wake_irqs();
2ed8d2b3 1176 suspend_device_irqs();
32bdfac5 1177 mutex_lock(&dpm_list_mtx);
28b6fd6e
LC
1178 pm_transition = state;
1179 async_error = 0;
1180
cf579dfb
RW
1181 while (!list_empty(&dpm_late_early_list)) {
1182 struct device *dev = to_device(dpm_late_early_list.prev);
d08a5ace
RW
1183
1184 get_device(dev);
1185 mutex_unlock(&dpm_list_mtx);
1186
28b6fd6e 1187 error = device_suspend_noirq(dev);
d08a5ace
RW
1188
1189 mutex_lock(&dpm_list_mtx);
775b64d2 1190 if (error) {
cf579dfb 1191 pm_dev_err(dev, state, " noirq", error);
2a77c46d 1192 dpm_save_failed_dev(dev_name(dev));
d08a5ace 1193 put_device(dev);
775b64d2
RW
1194 break;
1195 }
d08a5ace 1196 if (!list_empty(&dev->power.entry))
8a43a9ab 1197 list_move(&dev->power.entry, &dpm_noirq_list);
d08a5ace 1198 put_device(dev);
52d136cc 1199
28b6fd6e 1200 if (async_error)
52d136cc 1201 break;
775b64d2 1202 }
32bdfac5 1203 mutex_unlock(&dpm_list_mtx);
28b6fd6e
LC
1204 async_synchronize_full();
1205 if (!error)
1206 error = async_error;
1207
1208 if (error) {
1209 suspend_stats.failed_suspend_noirq++;
1210 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
d1616302 1211 dpm_resume_noirq(resume_event(state));
28b6fd6e 1212 } else {
cf579dfb 1213 dpm_show_time(starttime, state, "noirq");
28b6fd6e 1214 }
bb3632c6 1215 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
cf579dfb
RW
1216 return error;
1217}
1218
1219/**
1220 * device_suspend_late - Execute a "late suspend" callback for given device.
1221 * @dev: Device to handle.
1222 * @state: PM transition of the system being carried out.
58c256a3 1223 * @async: If true, the device is being suspended asynchronously.
cf579dfb
RW
1224 *
1225 * Runtime PM is disabled for @dev while this function is being executed.
1226 */
de377b39 1227static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
cf579dfb
RW
1228{
1229 pm_callback_t callback = NULL;
e3771fa9 1230 const char *info = NULL;
de377b39 1231 int error = 0;
cf579dfb 1232
431d452a
ZF
1233 TRACE_DEVICE(dev);
1234 TRACE_SUSPEND(0);
1235
9f6d8f6a
RW
1236 __pm_runtime_disable(dev, false);
1237
098c3055 1238 dpm_wait_for_subordinate(dev, async);
6f75c3fd 1239
de377b39
LC
1240 if (async_error)
1241 goto Complete;
1242
1243 if (pm_wakeup_pending()) {
1244 async_error = -EBUSY;
1245 goto Complete;
1246 }
1247
aae4518b 1248 if (dev->power.syscore || dev->power.direct_complete)
de377b39
LC
1249 goto Complete;
1250
cf579dfb
RW
1251 if (dev->pm_domain) {
1252 info = "late power domain ";
1253 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1254 } else if (dev->type && dev->type->pm) {
1255 info = "late type ";
1256 callback = pm_late_early_op(dev->type->pm, state);
1257 } else if (dev->class && dev->class->pm) {
1258 info = "late class ";
1259 callback = pm_late_early_op(dev->class->pm, state);
1260 } else if (dev->bus && dev->bus->pm) {
1261 info = "late bus ";
1262 callback = pm_late_early_op(dev->bus->pm, state);
1263 }
1264
1265 if (!callback && dev->driver && dev->driver->pm) {
1266 info = "late driver ";
1267 callback = pm_late_early_op(dev->driver->pm, state);
1268 }
1269
3d2699bc
LC
1270 error = dpm_run_callback(callback, dev, state, info);
1271 if (!error)
1272 dev->power.is_late_suspended = true;
de377b39
LC
1273 else
1274 async_error = error;
3d2699bc 1275
de377b39 1276Complete:
431d452a 1277 TRACE_SUSPEND(error);
de377b39 1278 complete_all(&dev->power.completion);
3d2699bc 1279 return error;
cf579dfb
RW
1280}
1281
de377b39
LC
1282static void async_suspend_late(void *data, async_cookie_t cookie)
1283{
1284 struct device *dev = (struct device *)data;
1285 int error;
1286
1287 error = __device_suspend_late(dev, pm_transition, true);
1288 if (error) {
1289 dpm_save_failed_dev(dev_name(dev));
1290 pm_dev_err(dev, pm_transition, " async", error);
1291 }
1292 put_device(dev);
1293}
1294
1295static int device_suspend_late(struct device *dev)
1296{
1297 reinit_completion(&dev->power.completion);
1298
431d452a 1299 if (is_async(dev)) {
de377b39
LC
1300 get_device(dev);
1301 async_schedule(async_suspend_late, dev);
1302 return 0;
1303 }
1304
1305 return __device_suspend_late(dev, pm_transition, false);
1306}
1307
cf579dfb
RW
1308/**
1309 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1310 * @state: PM transition of the system being carried out.
1311 */
2a8a8ce6 1312int dpm_suspend_late(pm_message_t state)
cf579dfb
RW
1313{
1314 ktime_t starttime = ktime_get();
1315 int error = 0;
1316
bb3632c6 1317 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
cf579dfb 1318 mutex_lock(&dpm_list_mtx);
de377b39
LC
1319 pm_transition = state;
1320 async_error = 0;
1321
cf579dfb
RW
1322 while (!list_empty(&dpm_suspended_list)) {
1323 struct device *dev = to_device(dpm_suspended_list.prev);
1324
1325 get_device(dev);
1326 mutex_unlock(&dpm_list_mtx);
1327
de377b39 1328 error = device_suspend_late(dev);
cf579dfb
RW
1329
1330 mutex_lock(&dpm_list_mtx);
3a17fb32
RW
1331 if (!list_empty(&dev->power.entry))
1332 list_move(&dev->power.entry, &dpm_late_early_list);
1333
cf579dfb
RW
1334 if (error) {
1335 pm_dev_err(dev, state, " late", error);
cf579dfb
RW
1336 dpm_save_failed_dev(dev_name(dev));
1337 put_device(dev);
1338 break;
1339 }
cf579dfb 1340 put_device(dev);
52d136cc 1341
de377b39 1342 if (async_error)
52d136cc 1343 break;
cf579dfb
RW
1344 }
1345 mutex_unlock(&dpm_list_mtx);
de377b39 1346 async_synchronize_full();
246ef766
ID
1347 if (!error)
1348 error = async_error;
de377b39
LC
1349 if (error) {
1350 suspend_stats.failed_suspend_late++;
1351 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
cf579dfb 1352 dpm_resume_early(resume_event(state));
de377b39 1353 } else {
ecf762b2 1354 dpm_show_time(starttime, state, "late");
de377b39 1355 }
bb3632c6 1356 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
775b64d2
RW
1357 return error;
1358}
cf579dfb
RW
1359
1360/**
1361 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1362 * @state: PM transition of the system being carried out.
1363 */
1364int dpm_suspend_end(pm_message_t state)
1365{
1366 int error = dpm_suspend_late(state);
064b021f
CC
1367 if (error)
1368 return error;
1369
1370 error = dpm_suspend_noirq(state);
1371 if (error) {
997a0311 1372 dpm_resume_early(resume_event(state));
064b021f
CC
1373 return error;
1374 }
cf579dfb 1375
064b021f 1376 return 0;
cf579dfb
RW
1377}
1378EXPORT_SYMBOL_GPL(dpm_suspend_end);
775b64d2 1379
875ab0b7
RW
1380/**
1381 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
0a884223
RD
1382 * @dev: Device to suspend.
1383 * @state: PM transition of the system being carried out.
1384 * @cb: Suspend callback to execute.
58c256a3 1385 * @info: string description of caller.
875ab0b7
RW
1386 */
1387static int legacy_suspend(struct device *dev, pm_message_t state,
53644677 1388 int (*cb)(struct device *dev, pm_message_t state),
e3771fa9 1389 const char *info)
875ab0b7
RW
1390{
1391 int error;
1392 ktime_t calltime;
1393
1394 calltime = initcall_debug_start(dev);
1395
e8bca479 1396 trace_device_pm_callback_start(dev, info, state.event);
875ab0b7 1397 error = cb(dev, state);
e8bca479 1398 trace_device_pm_callback_end(dev, error);
875ab0b7
RW
1399 suspend_report_result(cb, error);
1400
53644677 1401 initcall_debug_report(dev, calltime, error, state, info);
875ab0b7
RW
1402
1403 return error;
1404}
1405
8c73b428
RW
1406static void dpm_clear_suppliers_direct_complete(struct device *dev)
1407{
1408 struct device_link *link;
1409 int idx;
1410
1411 idx = device_links_read_lock();
1412
1413 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1414 spin_lock_irq(&link->supplier->power.lock);
1415 link->supplier->power.direct_complete = false;
1416 spin_unlock_irq(&link->supplier->power.lock);
1417 }
1418
1419 device_links_read_unlock(idx);
1420}
1421
775b64d2 1422/**
20d652d7
RW
1423 * device_suspend - Execute "suspend" callbacks for given device.
1424 * @dev: Device to handle.
1425 * @state: PM transition of the system being carried out.
5af84b82 1426 * @async: If true, the device is being suspended asynchronously.
775b64d2 1427 */
5af84b82 1428static int __device_suspend(struct device *dev, pm_message_t state, bool async)
cd59abfc 1429{
9cf519d1 1430 pm_callback_t callback = NULL;
e3771fa9 1431 const char *info = NULL;
cd59abfc 1432 int error = 0;
70fea60d 1433 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
cd59abfc 1434
431d452a
ZF
1435 TRACE_DEVICE(dev);
1436 TRACE_SUSPEND(0);
1437
8c73b428 1438 dpm_wait_for_subordinate(dev, async);
7a8d37a3 1439
5af84b82 1440 if (async_error)
1f758b23 1441 goto Complete;
1e2ef05b 1442
88d26136
AS
1443 /*
1444 * If a device configured to wake up the system from sleep states
1445 * has been suspended at run time and there's a resume request pending
1446 * for it, this is equivalent to the device signaling wakeup, so the
1447 * system suspend operation should be aborted.
1448 */
1e2ef05b
RW
1449 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1450 pm_wakeup_event(dev, 0);
5af84b82 1451
d83f905e
RW
1452 if (pm_wakeup_pending()) {
1453 async_error = -EBUSY;
1f758b23 1454 goto Complete;
d83f905e
RW
1455 }
1456
dbf37414
RW
1457 if (dev->power.syscore)
1458 goto Complete;
1459
aae4518b
RW
1460 if (dev->power.direct_complete) {
1461 if (pm_runtime_status_suspended(dev)) {
1462 pm_runtime_disable(dev);
019d8817 1463 if (pm_runtime_status_suspended(dev))
aae4518b
RW
1464 goto Complete;
1465
1466 pm_runtime_enable(dev);
1467 }
1468 dev->power.direct_complete = false;
1469 }
1470
70fea60d 1471 dpm_watchdog_set(&wd, dev);
1e2ef05b
RW
1472 device_lock(dev);
1473
564b905a 1474 if (dev->pm_domain) {
9cf519d1
RW
1475 info = "power domain ";
1476 callback = pm_op(&dev->pm_domain->ops, state);
1477 goto Run;
4d27e9dc
RW
1478 }
1479
9659cc06 1480 if (dev->type && dev->type->pm) {
9cf519d1
RW
1481 info = "type ";
1482 callback = pm_op(dev->type->pm, state);
1483 goto Run;
9659cc06
RW
1484 }
1485
1eede070
RW
1486 if (dev->class) {
1487 if (dev->class->pm) {
9cf519d1
RW
1488 info = "class ";
1489 callback = pm_op(dev->class->pm, state);
1490 goto Run;
1eede070
RW
1491 } else if (dev->class->suspend) {
1492 pm_dev_dbg(dev, state, "legacy class ");
53644677
SK
1493 error = legacy_suspend(dev, state, dev->class->suspend,
1494 "legacy class ");
4d27e9dc 1495 goto End;
1eede070 1496 }
cd59abfc
AS
1497 }
1498
1eede070
RW
1499 if (dev->bus) {
1500 if (dev->bus->pm) {
35cd133c 1501 info = "bus ";
9cf519d1 1502 callback = pm_op(dev->bus->pm, state);
1eede070 1503 } else if (dev->bus->suspend) {
35cd133c 1504 pm_dev_dbg(dev, state, "legacy bus ");
53644677
SK
1505 error = legacy_suspend(dev, state, dev->bus->suspend,
1506 "legacy bus ");
9cf519d1 1507 goto End;
1eede070 1508 }
7538e3db
RW
1509 }
1510
9cf519d1 1511 Run:
35cd133c
RW
1512 if (!callback && dev->driver && dev->driver->pm) {
1513 info = "driver ";
1514 callback = pm_op(dev->driver->pm, state);
1515 }
1516
9cf519d1
RW
1517 error = dpm_run_callback(callback, dev, state, info);
1518
1eede070 1519 End:
4ca46ff3 1520 if (!error) {
aae4518b
RW
1521 struct device *parent = dev->parent;
1522
4ca46ff3 1523 dev->power.is_suspended = true;
aae4518b
RW
1524 if (parent) {
1525 spin_lock_irq(&parent->power.lock);
1526
1527 dev->parent->power.direct_complete = false;
1528 if (dev->power.wakeup_path
1529 && !dev->parent->power.ignore_children)
1530 dev->parent->power.wakeup_path = true;
1531
1532 spin_unlock_irq(&parent->power.lock);
1533 }
8c73b428 1534 dpm_clear_suppliers_direct_complete(dev);
4ca46ff3 1535 }
6d0e0e84 1536
8e9394ce 1537 device_unlock(dev);
70fea60d 1538 dpm_watchdog_clear(&wd);
1f758b23
MSB
1539
1540 Complete:
88d26136 1541 if (error)
098dff73
RW
1542 async_error = error;
1543
05a92622 1544 complete_all(&dev->power.completion);
431d452a 1545 TRACE_SUSPEND(error);
cd59abfc
AS
1546 return error;
1547}
1548
5af84b82
RW
1549static void async_suspend(void *data, async_cookie_t cookie)
1550{
1551 struct device *dev = (struct device *)data;
1552 int error;
1553
1554 error = __device_suspend(dev, pm_transition, true);
2a77c46d
SL
1555 if (error) {
1556 dpm_save_failed_dev(dev_name(dev));
5af84b82 1557 pm_dev_err(dev, pm_transition, " async", error);
2a77c46d 1558 }
5af84b82
RW
1559
1560 put_device(dev);
1561}
1562
1563static int device_suspend(struct device *dev)
1564{
16735d02 1565 reinit_completion(&dev->power.completion);
5af84b82 1566
431d452a 1567 if (is_async(dev)) {
5af84b82
RW
1568 get_device(dev);
1569 async_schedule(async_suspend, dev);
1570 return 0;
1571 }
1572
1573 return __device_suspend(dev, pm_transition, false);
1574}
1575
cd59abfc 1576/**
20d652d7
RW
1577 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1578 * @state: PM transition of the system being carried out.
cd59abfc 1579 */
91e7c75b 1580int dpm_suspend(pm_message_t state)
cd59abfc 1581{
ecf762b2 1582 ktime_t starttime = ktime_get();
cd59abfc
AS
1583 int error = 0;
1584
bb3632c6 1585 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
91e7c75b
RW
1586 might_sleep();
1587
2f0aea93
VK
1588 cpufreq_suspend();
1589
cd59abfc 1590 mutex_lock(&dpm_list_mtx);
5af84b82
RW
1591 pm_transition = state;
1592 async_error = 0;
8a43a9ab
RW
1593 while (!list_empty(&dpm_prepared_list)) {
1594 struct device *dev = to_device(dpm_prepared_list.prev);
58aca232 1595
1eede070 1596 get_device(dev);
cd59abfc 1597 mutex_unlock(&dpm_list_mtx);
1eede070 1598
5af84b82 1599 error = device_suspend(dev);
1eede070 1600
1b3cbec1 1601 mutex_lock(&dpm_list_mtx);
775b64d2 1602 if (error) {
1eede070 1603 pm_dev_err(dev, state, "", error);
2a77c46d 1604 dpm_save_failed_dev(dev_name(dev));
1eede070 1605 put_device(dev);
775b64d2
RW
1606 break;
1607 }
7a8d37a3 1608 if (!list_empty(&dev->power.entry))
8a43a9ab 1609 list_move(&dev->power.entry, &dpm_suspended_list);
1eede070 1610 put_device(dev);
5af84b82
RW
1611 if (async_error)
1612 break;
cd59abfc
AS
1613 }
1614 mutex_unlock(&dpm_list_mtx);
5af84b82
RW
1615 async_synchronize_full();
1616 if (!error)
1617 error = async_error;
2a77c46d
SL
1618 if (error) {
1619 suspend_stats.failed_suspend++;
1620 dpm_save_failed_step(SUSPEND_SUSPEND);
1621 } else
ecf762b2 1622 dpm_show_time(starttime, state, NULL);
bb3632c6 1623 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1eede070
RW
1624 return error;
1625}
1626
1627/**
20d652d7
RW
1628 * device_prepare - Prepare a device for system power transition.
1629 * @dev: Device to handle.
1630 * @state: PM transition of the system being carried out.
1631 *
1632 * Execute the ->prepare() callback(s) for given device. No new children of the
1633 * device may be registered after this function has returned.
1eede070 1634 */
d1616302 1635static int device_prepare(struct device *dev, pm_message_t state)
1eede070 1636{
35cd133c 1637 int (*callback)(struct device *) = NULL;
aae4518b 1638 int ret = 0;
1eede070 1639
dbf37414
RW
1640 if (dev->power.syscore)
1641 return 0;
1642
88d26136
AS
1643 /*
1644 * If a device's parent goes into runtime suspend at the wrong time,
1645 * it won't be possible to resume the device. To prevent this we
1646 * block runtime suspend here, during the prepare phase, and allow
1647 * it again during the complete phase.
1648 */
1649 pm_runtime_get_noresume(dev);
1650
8e9394ce 1651 device_lock(dev);
1eede070 1652
4ca46ff3
RW
1653 dev->power.wakeup_path = device_may_wakeup(dev);
1654
aa8e54b5
TV
1655 if (dev->power.no_pm_callbacks) {
1656 ret = 1; /* Let device go direct_complete */
1657 goto unlock;
1658 }
1659
fba1fbf5 1660 if (dev->pm_domain)
35cd133c 1661 callback = dev->pm_domain->ops.prepare;
fba1fbf5 1662 else if (dev->type && dev->type->pm)
35cd133c 1663 callback = dev->type->pm->prepare;
fba1fbf5 1664 else if (dev->class && dev->class->pm)
35cd133c 1665 callback = dev->class->pm->prepare;
fba1fbf5 1666 else if (dev->bus && dev->bus->pm)
35cd133c 1667 callback = dev->bus->pm->prepare;
35cd133c 1668
fba1fbf5 1669 if (!callback && dev->driver && dev->driver->pm)
35cd133c 1670 callback = dev->driver->pm->prepare;
35cd133c 1671
32e8d689 1672 if (callback)
aae4518b 1673 ret = callback(dev);
7538e3db 1674
aa8e54b5 1675unlock:
8e9394ce 1676 device_unlock(dev);
1eede070 1677
aae4518b
RW
1678 if (ret < 0) {
1679 suspend_report_result(callback, ret);
aa1b9f13 1680 pm_runtime_put(dev);
aae4518b
RW
1681 return ret;
1682 }
1683 /*
1684 * A positive return value from ->prepare() means "this device appears
1685 * to be runtime-suspended and its state is fine, so if it really is
1686 * runtime-suspended, you can leave it in that state provided that you
1687 * will do the same thing with all of its descendants". This only
1688 * applies to suspend transitions, however.
1689 */
1690 spin_lock_irq(&dev->power.lock);
1691 dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
1692 spin_unlock_irq(&dev->power.lock);
1693 return 0;
1eede070 1694}
cd59abfc 1695
1eede070 1696/**
20d652d7
RW
1697 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1698 * @state: PM transition of the system being carried out.
1eede070 1699 *
20d652d7 1700 * Execute the ->prepare() callback(s) for all devices.
1eede070 1701 */
91e7c75b 1702int dpm_prepare(pm_message_t state)
1eede070 1703{
1eede070
RW
1704 int error = 0;
1705
bb3632c6 1706 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
91e7c75b
RW
1707 might_sleep();
1708
013c074f
SG
1709 /*
1710 * Give a chance for the known devices to complete their probes, before
1711 * disable probing of devices. This sync point is important at least
1712 * at boot time + hibernation restore.
1713 */
1714 wait_for_device_probe();
1715 /*
1716 * It is unsafe if probing of devices will happen during suspend or
1717 * hibernation and system behavior will be unpredictable in this case.
1718 * So, let's prohibit device's probing here and defer their probes
1719 * instead. The normal behavior will be restored in dpm_complete().
1720 */
1721 device_block_probing();
1722
1eede070 1723 mutex_lock(&dpm_list_mtx);
1eede070
RW
1724 while (!list_empty(&dpm_list)) {
1725 struct device *dev = to_device(dpm_list.next);
1726
1727 get_device(dev);
1eede070
RW
1728 mutex_unlock(&dpm_list_mtx);
1729
32e8d689 1730 trace_device_pm_callback_start(dev, "", state.event);
1e2ef05b 1731 error = device_prepare(dev, state);
32e8d689 1732 trace_device_pm_callback_end(dev, error);
1eede070
RW
1733
1734 mutex_lock(&dpm_list_mtx);
1735 if (error) {
1eede070
RW
1736 if (error == -EAGAIN) {
1737 put_device(dev);
886a7a33 1738 error = 0;
1eede070
RW
1739 continue;
1740 }
1e75227e
RW
1741 printk(KERN_INFO "PM: Device %s not prepared "
1742 "for power transition: code %d\n",
5c1a07ab 1743 dev_name(dev), error);
1eede070
RW
1744 put_device(dev);
1745 break;
1746 }
f76b168b 1747 dev->power.is_prepared = true;
1eede070 1748 if (!list_empty(&dev->power.entry))
8a43a9ab 1749 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1eede070
RW
1750 put_device(dev);
1751 }
1eede070 1752 mutex_unlock(&dpm_list_mtx);
bb3632c6 1753 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
cd59abfc
AS
1754 return error;
1755}
1756
775b64d2 1757/**
20d652d7
RW
1758 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1759 * @state: PM transition of the system being carried out.
775b64d2 1760 *
20d652d7
RW
1761 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1762 * callbacks for them.
775b64d2 1763 */
d1616302 1764int dpm_suspend_start(pm_message_t state)
775b64d2
RW
1765{
1766 int error;
cd59abfc 1767
1eede070 1768 error = dpm_prepare(state);
2a77c46d
SL
1769 if (error) {
1770 suspend_stats.failed_prepare++;
1771 dpm_save_failed_step(SUSPEND_PREPARE);
1772 } else
1eede070 1773 error = dpm_suspend(state);
cd59abfc 1774 return error;
cd59abfc 1775}
d1616302 1776EXPORT_SYMBOL_GPL(dpm_suspend_start);
cd59abfc
AS
1777
1778void __suspend_report_result(const char *function, void *fn, int ret)
1779{
c80cfb04
BH
1780 if (ret)
1781 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
cd59abfc
AS
1782}
1783EXPORT_SYMBOL_GPL(__suspend_report_result);
f8824cee
RW
1784
1785/**
1786 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1787 * @dev: Device to wait for.
1788 * @subordinate: Device that needs to wait for @dev.
1789 */
098dff73 1790int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
f8824cee
RW
1791{
1792 dpm_wait(dev, subordinate->power.async_suspend);
098dff73 1793 return async_error;
f8824cee
RW
1794}
1795EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
dfe3212e
ML
1796
1797/**
1798 * dpm_for_each_dev - device iterator.
1799 * @data: data for the callback.
1800 * @fn: function to be called for each device.
1801 *
1802 * Iterate over devices in dpm_list, and call @fn for each device,
1803 * passing it @data.
1804 */
1805void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1806{
1807 struct device *dev;
1808
1809 if (!fn)
1810 return;
1811
1812 device_pm_lock();
1813 list_for_each_entry(dev, &dpm_list, power.entry)
1814 fn(dev, data);
1815 device_pm_unlock();
1816}
1817EXPORT_SYMBOL_GPL(dpm_for_each_dev);
aa8e54b5
TV
1818
1819static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1820{
1821 if (!ops)
1822 return true;
1823
1824 return !ops->prepare &&
1825 !ops->suspend &&
1826 !ops->suspend_late &&
1827 !ops->suspend_noirq &&
1828 !ops->resume_noirq &&
1829 !ops->resume_early &&
1830 !ops->resume &&
1831 !ops->complete;
1832}
1833
1834void device_pm_check_callbacks(struct device *dev)
1835{
1836 spin_lock_irq(&dev->power.lock);
1837 dev->power.no_pm_callbacks =
1838 (!dev->bus || pm_ops_is_empty(dev->bus->pm)) &&
1839 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
1840 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
1841 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
1842 (!dev->driver || pm_ops_is_empty(dev->driver->pm));
1843 spin_unlock_irq(&dev->power.lock);
1844}