PM: sleep: core: Do not skip callbacks in the resume phase
[linux-block.git] / drivers / base / power / main.c
CommitLineData
5de363b6 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * drivers/base/power/main.c - Where the driver meets power management.
4 *
5 * Copyright (c) 2003 Patrick Mochel
6 * Copyright (c) 2003 Open Source Development Lab
7 *
1da177e4 8 * The driver model core calls device_pm_add() when a device is registered.
b595076a 9 * This will initialize the embedded device_pm_info object in the device
1da177e4
LT
10 * and add it to the list of power-controlled devices. sysfs entries for
11 * controlling device power management will also be added.
12 *
1eede070
RW
13 * A separate list is used for keeping track of power info, because the power
14 * domain dependencies may differ from the ancestral dependencies that the
15 * subsystem list maintains.
1da177e4
LT
16 */
17
7a5bd127
JP
18#define pr_fmt(fmt) "PM: " fmt
19
1da177e4 20#include <linux/device.h>
1b6bc32f 21#include <linux/export.h>
11048dcf 22#include <linux/mutex.h>
cd59abfc 23#include <linux/pm.h>
5e928f77 24#include <linux/pm_runtime.h>
431d452a 25#include <linux/pm-trace.h>
4990d4fe 26#include <linux/pm_wakeirq.h>
2ed8d2b3 27#include <linux/interrupt.h>
f2511774 28#include <linux/sched.h>
b17b0153 29#include <linux/sched/debug.h>
5af84b82 30#include <linux/async.h>
1e75227e 31#include <linux/suspend.h>
53644677 32#include <trace/events/power.h>
2f0aea93 33#include <linux/cpufreq.h>
8651f97b 34#include <linux/cpuidle.h>
6e863844 35#include <linux/devfreq.h>
70fea60d
BG
36#include <linux/timer.h>
37
cd59abfc 38#include "../base.h"
1da177e4
LT
39#include "power.h"
40
9cf519d1
RW
41typedef int (*pm_callback_t)(struct device *);
42
42beb82e
MB
43#define list_for_each_entry_rcu_locked(pos, head, member) \
44 list_for_each_entry_rcu(pos, head, member, \
45 device_links_read_lock_held())
46
775b64d2 47/*
1eede070 48 * The entries in the dpm_list list are in a depth first order, simply
775b64d2
RW
49 * because children are guaranteed to be discovered after parents, and
50 * are inserted at the back of the list on discovery.
51 *
8e9394ce
GKH
52 * Since device_pm_add() may be called with a device lock held,
53 * we must never try to acquire a device lock while holding
775b64d2
RW
54 * dpm_list_mutex.
55 */
56
1eede070 57LIST_HEAD(dpm_list);
7664e969
SK
58static LIST_HEAD(dpm_prepared_list);
59static LIST_HEAD(dpm_suspended_list);
60static LIST_HEAD(dpm_late_early_list);
61static LIST_HEAD(dpm_noirq_list);
1da177e4 62
2a77c46d 63struct suspend_stats suspend_stats;
cd59abfc 64static DEFINE_MUTEX(dpm_list_mtx);
5af84b82 65static pm_message_t pm_transition;
1da177e4 66
098dff73
RW
67static int async_error;
68
952856db 69static const char *pm_verb(int event)
53644677
SK
70{
71 switch (event) {
72 case PM_EVENT_SUSPEND:
73 return "suspend";
74 case PM_EVENT_RESUME:
75 return "resume";
76 case PM_EVENT_FREEZE:
77 return "freeze";
78 case PM_EVENT_QUIESCE:
79 return "quiesce";
80 case PM_EVENT_HIBERNATE:
81 return "hibernate";
82 case PM_EVENT_THAW:
83 return "thaw";
84 case PM_EVENT_RESTORE:
85 return "restore";
86 case PM_EVENT_RECOVER:
87 return "recover";
88 default:
89 return "(unknown PM event)";
90 }
91}
92
5e928f77 93/**
e91c11b1 94 * device_pm_sleep_init - Initialize system suspend-related device fields.
5e928f77
RW
95 * @dev: Device object being initialized.
96 */
e91c11b1 97void device_pm_sleep_init(struct device *dev)
5e928f77 98{
f76b168b 99 dev->power.is_prepared = false;
6d0e0e84 100 dev->power.is_suspended = false;
3d2699bc
LC
101 dev->power.is_noirq_suspended = false;
102 dev->power.is_late_suspended = false;
5af84b82 103 init_completion(&dev->power.completion);
152e1d59 104 complete_all(&dev->power.completion);
074037ec 105 dev->power.wakeup = NULL;
22110faf 106 INIT_LIST_HEAD(&dev->power.entry);
5e928f77
RW
107}
108
1eede070 109/**
20d652d7 110 * device_pm_lock - Lock the list of active devices used by the PM core.
1eede070
RW
111 */
112void device_pm_lock(void)
113{
114 mutex_lock(&dpm_list_mtx);
115}
116
117/**
20d652d7 118 * device_pm_unlock - Unlock the list of active devices used by the PM core.
1eede070
RW
119 */
120void device_pm_unlock(void)
121{
122 mutex_unlock(&dpm_list_mtx);
123}
075c1771 124
775b64d2 125/**
20d652d7
RW
126 * device_pm_add - Add a device to the PM core's list of active devices.
127 * @dev: Device to add to the list.
775b64d2 128 */
3b98aeaf 129void device_pm_add(struct device *dev)
1da177e4 130{
85945c28
SH
131 /* Skip PM setup/initialization. */
132 if (device_pm_not_required(dev))
133 return;
134
7a5bd127 135 pr_debug("Adding info for %s:%s\n",
5c1a07ab 136 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
aa8e54b5 137 device_pm_check_callbacks(dev);
11048dcf 138 mutex_lock(&dpm_list_mtx);
f76b168b 139 if (dev->parent && dev->parent->power.is_prepared)
b64959e6
RW
140 dev_warn(dev, "parent %s should not be sleeping\n",
141 dev_name(dev->parent));
3b98aeaf 142 list_add_tail(&dev->power.entry, &dpm_list);
9ed98953 143 dev->power.in_dpm_list = true;
1a9a9152 144 mutex_unlock(&dpm_list_mtx);
1da177e4
LT
145}
146
775b64d2 147/**
20d652d7
RW
148 * device_pm_remove - Remove a device from the PM core's list of active devices.
149 * @dev: Device to be removed from the list.
775b64d2 150 */
9cddad77 151void device_pm_remove(struct device *dev)
1da177e4 152{
85945c28
SH
153 if (device_pm_not_required(dev))
154 return;
155
7a5bd127 156 pr_debug("Removing info for %s:%s\n",
5c1a07ab 157 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
5af84b82 158 complete_all(&dev->power.completion);
11048dcf 159 mutex_lock(&dpm_list_mtx);
1da177e4 160 list_del_init(&dev->power.entry);
9ed98953 161 dev->power.in_dpm_list = false;
11048dcf 162 mutex_unlock(&dpm_list_mtx);
074037ec 163 device_wakeup_disable(dev);
5e928f77 164 pm_runtime_remove(dev);
aa8e54b5 165 device_pm_check_callbacks(dev);
775b64d2
RW
166}
167
ffa6a705 168/**
20d652d7
RW
169 * device_pm_move_before - Move device in the PM core's list of active devices.
170 * @deva: Device to move in dpm_list.
171 * @devb: Device @deva should come before.
ffa6a705
CH
172 */
173void device_pm_move_before(struct device *deva, struct device *devb)
174{
7a5bd127 175 pr_debug("Moving %s:%s before %s:%s\n",
5c1a07ab
RW
176 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
177 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
ffa6a705
CH
178 /* Delete deva from dpm_list and reinsert before devb. */
179 list_move_tail(&deva->power.entry, &devb->power.entry);
180}
181
182/**
20d652d7
RW
183 * device_pm_move_after - Move device in the PM core's list of active devices.
184 * @deva: Device to move in dpm_list.
185 * @devb: Device @deva should come after.
ffa6a705
CH
186 */
187void device_pm_move_after(struct device *deva, struct device *devb)
188{
7a5bd127 189 pr_debug("Moving %s:%s after %s:%s\n",
5c1a07ab
RW
190 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
191 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
ffa6a705
CH
192 /* Delete deva from dpm_list and reinsert after devb. */
193 list_move(&deva->power.entry, &devb->power.entry);
194}
195
196/**
20d652d7
RW
197 * device_pm_move_last - Move device to end of the PM core's list of devices.
198 * @dev: Device to move in dpm_list.
ffa6a705
CH
199 */
200void device_pm_move_last(struct device *dev)
201{
7a5bd127 202 pr_debug("Moving %s:%s to end of list\n",
5c1a07ab 203 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
ffa6a705
CH
204 list_move_tail(&dev->power.entry, &dpm_list);
205}
206
7f817ba9 207static ktime_t initcall_debug_start(struct device *dev, void *cb)
875ab0b7 208{
143711f0
BH
209 if (!pm_print_times_enabled)
210 return 0;
875ab0b7 211
d75f773c 212 dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
7f817ba9
BH
213 task_pid_nr(current),
214 dev->parent ? dev_name(dev->parent) : "none");
143711f0 215 return ktime_get();
875ab0b7
RW
216}
217
218static void initcall_debug_report(struct device *dev, ktime_t calltime,
7f817ba9 219 void *cb, int error)
875ab0b7 220{
53644677
SK
221 ktime_t rettime;
222 s64 nsecs;
223
143711f0
BH
224 if (!pm_print_times_enabled)
225 return;
226
53644677
SK
227 rettime = ktime_get();
228 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
875ab0b7 229
d75f773c 230 dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
7f817ba9 231 (unsigned long long)nsecs >> 10);
875ab0b7
RW
232}
233
5af84b82
RW
234/**
235 * dpm_wait - Wait for a PM operation to complete.
236 * @dev: Device to wait for.
237 * @async: If unset, wait only if the device's power.async_suspend flag is set.
238 */
239static void dpm_wait(struct device *dev, bool async)
240{
241 if (!dev)
242 return;
243
0e06b4a8 244 if (async || (pm_async_enabled && dev->power.async_suspend))
5af84b82
RW
245 wait_for_completion(&dev->power.completion);
246}
247
248static int dpm_wait_fn(struct device *dev, void *async_ptr)
249{
250 dpm_wait(dev, *((bool *)async_ptr));
251 return 0;
252}
253
254static void dpm_wait_for_children(struct device *dev, bool async)
255{
256 device_for_each_child(dev, &async, dpm_wait_fn);
257}
258
8c73b428
RW
259static void dpm_wait_for_suppliers(struct device *dev, bool async)
260{
261 struct device_link *link;
262 int idx;
263
264 idx = device_links_read_lock();
265
266 /*
267 * If the supplier goes away right after we've checked the link to it,
268 * we'll wait for its completion to change the state, but that's fine,
269 * because the only things that will block as a result are the SRCU
270 * callbacks freeing the link objects for the links in the list we're
271 * walking.
272 */
42beb82e 273 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
8c73b428
RW
274 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
275 dpm_wait(link->supplier, async);
276
277 device_links_read_unlock(idx);
278}
279
0552e05f 280static bool dpm_wait_for_superior(struct device *dev, bool async)
8c73b428 281{
0552e05f
RW
282 struct device *parent;
283
284 /*
285 * If the device is resumed asynchronously and the parent's callback
286 * deletes both the device and the parent itself, the parent object may
287 * be freed while this function is running, so avoid that by reference
288 * counting the parent once more unless the device has been deleted
289 * already (in which case return right away).
290 */
291 mutex_lock(&dpm_list_mtx);
292
293 if (!device_pm_initialized(dev)) {
294 mutex_unlock(&dpm_list_mtx);
295 return false;
296 }
297
298 parent = get_device(dev->parent);
299
300 mutex_unlock(&dpm_list_mtx);
301
302 dpm_wait(parent, async);
303 put_device(parent);
304
8c73b428 305 dpm_wait_for_suppliers(dev, async);
0552e05f
RW
306
307 /*
308 * If the parent's callback has deleted the device, attempting to resume
309 * it would be invalid, so avoid doing that then.
310 */
311 return device_pm_initialized(dev);
8c73b428
RW
312}
313
314static void dpm_wait_for_consumers(struct device *dev, bool async)
315{
316 struct device_link *link;
317 int idx;
318
319 idx = device_links_read_lock();
320
321 /*
322 * The status of a device link can only be changed from "dormant" by a
323 * probe, but that cannot happen during system suspend/resume. In
324 * theory it can change to "dormant" at that time, but then it is
325 * reasonable to wait for the target device anyway (eg. if it goes
326 * away, it's better to wait for it to go away completely and then
327 * continue instead of trying to continue in parallel with its
328 * unregistration).
329 */
42beb82e 330 list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
8c73b428
RW
331 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
332 dpm_wait(link->consumer, async);
333
334 device_links_read_unlock(idx);
335}
336
337static void dpm_wait_for_subordinate(struct device *dev, bool async)
338{
339 dpm_wait_for_children(dev, async);
340 dpm_wait_for_consumers(dev, async);
341}
342
1eede070 343/**
9cf519d1 344 * pm_op - Return the PM operation appropriate for given PM event.
20d652d7
RW
345 * @ops: PM operations to choose from.
346 * @state: PM transition of the system being carried out.
1eede070 347 */
9cf519d1 348static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
1eede070 349{
1eede070
RW
350 switch (state.event) {
351#ifdef CONFIG_SUSPEND
352 case PM_EVENT_SUSPEND:
9cf519d1 353 return ops->suspend;
1eede070 354 case PM_EVENT_RESUME:
9cf519d1 355 return ops->resume;
1eede070 356#endif /* CONFIG_SUSPEND */
1f112cee 357#ifdef CONFIG_HIBERNATE_CALLBACKS
1eede070
RW
358 case PM_EVENT_FREEZE:
359 case PM_EVENT_QUIESCE:
9cf519d1 360 return ops->freeze;
1eede070 361 case PM_EVENT_HIBERNATE:
9cf519d1 362 return ops->poweroff;
1eede070
RW
363 case PM_EVENT_THAW:
364 case PM_EVENT_RECOVER:
9cf519d1 365 return ops->thaw;
1eede070
RW
366 break;
367 case PM_EVENT_RESTORE:
9cf519d1 368 return ops->restore;
1f112cee 369#endif /* CONFIG_HIBERNATE_CALLBACKS */
1eede070 370 }
f2511774 371
9cf519d1 372 return NULL;
1eede070
RW
373}
374
cf579dfb
RW
375/**
376 * pm_late_early_op - Return the PM operation appropriate for given PM event.
377 * @ops: PM operations to choose from.
378 * @state: PM transition of the system being carried out.
379 *
380 * Runtime PM is disabled for @dev while this function is being executed.
381 */
382static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
383 pm_message_t state)
384{
385 switch (state.event) {
386#ifdef CONFIG_SUSPEND
387 case PM_EVENT_SUSPEND:
388 return ops->suspend_late;
389 case PM_EVENT_RESUME:
390 return ops->resume_early;
391#endif /* CONFIG_SUSPEND */
392#ifdef CONFIG_HIBERNATE_CALLBACKS
393 case PM_EVENT_FREEZE:
394 case PM_EVENT_QUIESCE:
395 return ops->freeze_late;
396 case PM_EVENT_HIBERNATE:
397 return ops->poweroff_late;
398 case PM_EVENT_THAW:
399 case PM_EVENT_RECOVER:
400 return ops->thaw_early;
401 case PM_EVENT_RESTORE:
402 return ops->restore_early;
403#endif /* CONFIG_HIBERNATE_CALLBACKS */
404 }
405
406 return NULL;
407}
408
1eede070 409/**
9cf519d1 410 * pm_noirq_op - Return the PM operation appropriate for given PM event.
20d652d7
RW
411 * @ops: PM operations to choose from.
412 * @state: PM transition of the system being carried out.
1eede070 413 *
20d652d7
RW
414 * The driver of @dev will not receive interrupts while this function is being
415 * executed.
1eede070 416 */
9cf519d1 417static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
1eede070 418{
1eede070
RW
419 switch (state.event) {
420#ifdef CONFIG_SUSPEND
421 case PM_EVENT_SUSPEND:
9cf519d1 422 return ops->suspend_noirq;
1eede070 423 case PM_EVENT_RESUME:
9cf519d1 424 return ops->resume_noirq;
1eede070 425#endif /* CONFIG_SUSPEND */
1f112cee 426#ifdef CONFIG_HIBERNATE_CALLBACKS
1eede070
RW
427 case PM_EVENT_FREEZE:
428 case PM_EVENT_QUIESCE:
9cf519d1 429 return ops->freeze_noirq;
1eede070 430 case PM_EVENT_HIBERNATE:
9cf519d1 431 return ops->poweroff_noirq;
1eede070
RW
432 case PM_EVENT_THAW:
433 case PM_EVENT_RECOVER:
9cf519d1 434 return ops->thaw_noirq;
1eede070 435 case PM_EVENT_RESTORE:
9cf519d1 436 return ops->restore_noirq;
1f112cee 437#endif /* CONFIG_HIBERNATE_CALLBACKS */
1eede070 438 }
f2511774 439
9cf519d1 440 return NULL;
1eede070
RW
441}
442
e3771fa9 443static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
1eede070
RW
444{
445 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
446 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
447 ", may wakeup" : "");
448}
449
e3771fa9 450static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
1eede070
RW
451 int error)
452{
7a5bd127
JP
453 pr_err("Device %s failed to %s%s: error %d\n",
454 dev_name(dev), pm_verb(state.event), info, error);
1eede070
RW
455}
456
48059c09 457static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
e3771fa9 458 const char *info)
ecf762b2
RW
459{
460 ktime_t calltime;
0702d9ee 461 u64 usecs64;
ecf762b2
RW
462 int usecs;
463
464 calltime = ktime_get();
465 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
466 do_div(usecs64, NSEC_PER_USEC);
467 usecs = usecs64;
468 if (usecs == 0)
469 usecs = 1;
8d8b2441 470
48059c09 471 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
8d8b2441 472 info ?: "", info ? " " : "", pm_verb(state.event),
48059c09 473 error ? "aborted" : "complete",
8d8b2441 474 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
ecf762b2
RW
475}
476
9cf519d1 477static int dpm_run_callback(pm_callback_t cb, struct device *dev,
e3771fa9 478 pm_message_t state, const char *info)
9cf519d1
RW
479{
480 ktime_t calltime;
481 int error;
482
483 if (!cb)
484 return 0;
485
7f817ba9 486 calltime = initcall_debug_start(dev, cb);
9cf519d1
RW
487
488 pm_dev_dbg(dev, state, info);
e8bca479 489 trace_device_pm_callback_start(dev, info, state.event);
9cf519d1 490 error = cb(dev);
e8bca479 491 trace_device_pm_callback_end(dev, error);
9cf519d1
RW
492 suspend_report_result(cb, error);
493
7f817ba9 494 initcall_debug_report(dev, calltime, cb, error);
9cf519d1
RW
495
496 return error;
497}
498
70fea60d
BG
499#ifdef CONFIG_DPM_WATCHDOG
500struct dpm_watchdog {
501 struct device *dev;
502 struct task_struct *tsk;
503 struct timer_list timer;
504};
505
506#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
507 struct dpm_watchdog wd
508
509/**
510 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
c4a586fd 511 * @t: The timer that PM watchdog depends on.
70fea60d
BG
512 *
513 * Called when a driver has timed out suspending or resuming.
514 * There's not much we can do here to recover so panic() to
515 * capture a crash-dump in pstore.
516 */
9c6c273a 517static void dpm_watchdog_handler(struct timer_list *t)
70fea60d 518{
9c6c273a 519 struct dpm_watchdog *wd = from_timer(wd, t, timer);
70fea60d
BG
520
521 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
522 show_stack(wd->tsk, NULL);
523 panic("%s %s: unrecoverable failure\n",
524 dev_driver_string(wd->dev), dev_name(wd->dev));
525}
526
527/**
528 * dpm_watchdog_set - Enable pm watchdog for given device.
529 * @wd: Watchdog. Must be allocated on the stack.
530 * @dev: Device to handle.
531 */
532static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
533{
534 struct timer_list *timer = &wd->timer;
535
536 wd->dev = dev;
537 wd->tsk = current;
538
9c6c273a 539 timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
70fea60d
BG
540 /* use same timeout value for both suspend and resume */
541 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
70fea60d
BG
542 add_timer(timer);
543}
544
545/**
546 * dpm_watchdog_clear - Disable suspend/resume watchdog.
547 * @wd: Watchdog to disable.
548 */
549static void dpm_watchdog_clear(struct dpm_watchdog *wd)
550{
551 struct timer_list *timer = &wd->timer;
552
553 del_timer_sync(timer);
554 destroy_timer_on_stack(timer);
555}
556#else
557#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
558#define dpm_watchdog_set(x, y)
559#define dpm_watchdog_clear(x)
560#endif
561
cd59abfc
AS
562/*------------------------- Resume routines -------------------------*/
563
0d4b54c6
RW
564/**
565 * dev_pm_may_skip_resume - System-wide device resume optimization check.
566 * @dev: Target device.
567 *
6e176bf8
RW
568 * Return:
569 * - %false if the transition under way is RESTORE.
570 * - The return value of dev_pm_smart_suspend_and_suspended() if the transition
571 * under way is THAW.
572 * - The logical negation of %power.must_resume otherwise (that is, when the
573 * transition under way is RESUME).
0d4b54c6
RW
574 */
575bool dev_pm_may_skip_resume(struct device *dev)
576{
6e176bf8
RW
577 if (pm_transition.event == PM_EVENT_RESTORE)
578 return false;
579
580 if (pm_transition.event == PM_EVENT_THAW)
581 return dev_pm_smart_suspend_and_suspended(dev);
582
583 return !dev->power.must_resume;
0d4b54c6
RW
584}
585
cd59abfc 586/**
b082ddd8 587 * device_resume_noirq - Execute a "noirq resume" callback for given device.
20d652d7
RW
588 * @dev: Device to handle.
589 * @state: PM transition of the system being carried out.
58c256a3 590 * @async: If true, the device is being resumed asynchronously.
cd59abfc 591 *
20d652d7
RW
592 * The driver of @dev will not receive interrupts while this function is being
593 * executed.
cd59abfc 594 */
76569faa 595static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
cd59abfc 596{
30205377
RW
597 pm_callback_t callback = NULL;
598 const char *info = NULL;
32bfa56a 599 bool skip_resume;
cd59abfc
AS
600 int error = 0;
601
602 TRACE_DEVICE(dev);
603 TRACE_RESUME(0);
604
aae4518b 605 if (dev->power.syscore || dev->power.direct_complete)
dbf37414
RW
606 goto Out;
607
3d2699bc
LC
608 if (!dev->power.is_noirq_suspended)
609 goto Out;
610
0552e05f
RW
611 if (!dpm_wait_for_superior(dev, async))
612 goto Out;
76569faa 613
6e176bf8
RW
614 skip_resume = dev_pm_may_skip_resume(dev);
615 /*
616 * If the driver callback is skipped below or by the middle layer
617 * callback and device_resume_early() also skips the driver callback for
618 * this device later, it needs to appear as "suspended" to PM-runtime,
619 * so change its status accordingly.
620 *
621 * Otherwise, the device is going to be resumed, so set its PM-runtime
622 * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
623 * to avoid confusing drivers that don't use it.
624 */
625 if (skip_resume)
626 pm_runtime_set_suspended(dev);
627 else if (dev_pm_smart_suspend_and_suspended(dev))
628 pm_runtime_set_active(dev);
629
30205377
RW
630 if (dev->pm_domain) {
631 info = "noirq power domain ";
632 callback = pm_noirq_op(&dev->pm_domain->ops, state);
633 } else if (dev->type && dev->type->pm) {
634 info = "noirq type ";
635 callback = pm_noirq_op(dev->type->pm, state);
636 } else if (dev->class && dev->class->pm) {
637 info = "noirq class ";
638 callback = pm_noirq_op(dev->class->pm, state);
639 } else if (dev->bus && dev->bus->pm) {
640 info = "noirq bus ";
641 callback = pm_noirq_op(dev->bus->pm, state);
642 }
6e176bf8 643 if (callback)
75e94645 644 goto Run;
e7176a37 645
32bfa56a
RW
646 if (skip_resume)
647 goto Skip;
648
75e94645 649 if (dev->driver && dev->driver->pm) {
cf579dfb 650 info = "noirq driver ";
35cd133c
RW
651 callback = pm_noirq_op(dev->driver->pm, state);
652 }
653
75e94645 654Run:
9cf519d1 655 error = dpm_run_callback(callback, dev, state, info);
75e94645
RW
656
657Skip:
3d2699bc 658 dev->power.is_noirq_suspended = false;
9cf519d1 659
75e94645 660Out:
76569faa 661 complete_all(&dev->power.completion);
775b64d2
RW
662 TRACE_RESUME(error);
663 return error;
664}
665
76569faa
LC
666static bool is_async(struct device *dev)
667{
668 return dev->power.async_suspend && pm_async_enabled
669 && !pm_trace_is_enabled();
670}
671
f2a424f6
YL
672static bool dpm_async_fn(struct device *dev, async_func_t func)
673{
674 reinit_completion(&dev->power.completion);
675
676 if (is_async(dev)) {
677 get_device(dev);
678 async_schedule(func, dev);
679 return true;
680 }
681
682 return false;
683}
684
76569faa
LC
685static void async_resume_noirq(void *data, async_cookie_t cookie)
686{
687 struct device *dev = (struct device *)data;
688 int error;
689
690 error = device_resume_noirq(dev, pm_transition, true);
691 if (error)
692 pm_dev_err(dev, pm_transition, " async", error);
693
694 put_device(dev);
695}
696
b605c44c 697static void dpm_noirq_resume_devices(pm_message_t state)
775b64d2 698{
76569faa 699 struct device *dev;
ecf762b2 700 ktime_t starttime = ktime_get();
775b64d2 701
bb3632c6 702 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
32bdfac5 703 mutex_lock(&dpm_list_mtx);
76569faa 704 pm_transition = state;
d08a5ace 705
76569faa
LC
706 /*
707 * Advanced the async threads upfront,
708 * in case the starting of async threads is
709 * delayed by non-async resuming devices.
710 */
f2a424f6
YL
711 list_for_each_entry(dev, &dpm_noirq_list, power.entry)
712 dpm_async_fn(dev, async_resume_noirq);
76569faa
LC
713
714 while (!list_empty(&dpm_noirq_list)) {
715 dev = to_device(dpm_noirq_list.next);
d08a5ace 716 get_device(dev);
cf579dfb 717 list_move_tail(&dev->power.entry, &dpm_late_early_list);
5b219a51 718 mutex_unlock(&dpm_list_mtx);
d08a5ace 719
76569faa
LC
720 if (!is_async(dev)) {
721 int error;
722
723 error = device_resume_noirq(dev, state, false);
724 if (error) {
725 suspend_stats.failed_resume_noirq++;
726 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
727 dpm_save_failed_dev(dev_name(dev));
728 pm_dev_err(dev, state, " noirq", error);
729 }
cf579dfb
RW
730 }
731
732 mutex_lock(&dpm_list_mtx);
733 put_device(dev);
734 }
735 mutex_unlock(&dpm_list_mtx);
76569faa 736 async_synchronize_full();
48059c09 737 dpm_show_time(starttime, state, 0, "noirq");
786f41fb
RW
738 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
739}
740
786f41fb
RW
741/**
742 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
743 * @state: PM transition of the system being carried out.
744 *
745 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
746 * allow device drivers' interrupt handlers to be called.
747 */
748void dpm_resume_noirq(pm_message_t state)
749{
750 dpm_noirq_resume_devices(state);
b605c44c
RW
751
752 resume_device_irqs();
753 device_wakeup_disarm_wake_irqs();
754
755 cpuidle_resume();
cf579dfb
RW
756}
757
758/**
759 * device_resume_early - Execute an "early resume" callback for given device.
760 * @dev: Device to handle.
761 * @state: PM transition of the system being carried out.
58c256a3 762 * @async: If true, the device is being resumed asynchronously.
cf579dfb
RW
763 *
764 * Runtime PM is disabled for @dev while this function is being executed.
765 */
9e5e7910 766static int device_resume_early(struct device *dev, pm_message_t state, bool async)
cf579dfb 767{
30205377
RW
768 pm_callback_t callback = NULL;
769 const char *info = NULL;
cf579dfb
RW
770 int error = 0;
771
772 TRACE_DEVICE(dev);
773 TRACE_RESUME(0);
774
aae4518b 775 if (dev->power.syscore || dev->power.direct_complete)
dbf37414
RW
776 goto Out;
777
3d2699bc
LC
778 if (!dev->power.is_late_suspended)
779 goto Out;
780
0552e05f
RW
781 if (!dpm_wait_for_superior(dev, async))
782 goto Out;
9e5e7910 783
30205377
RW
784 if (dev->pm_domain) {
785 info = "early power domain ";
786 callback = pm_late_early_op(&dev->pm_domain->ops, state);
787 } else if (dev->type && dev->type->pm) {
788 info = "early type ";
789 callback = pm_late_early_op(dev->type->pm, state);
790 } else if (dev->class && dev->class->pm) {
791 info = "early class ";
792 callback = pm_late_early_op(dev->class->pm, state);
793 } else if (dev->bus && dev->bus->pm) {
794 info = "early bus ";
795 callback = pm_late_early_op(dev->bus->pm, state);
6e176bf8
RW
796 }
797 if (callback)
798 goto Run;
799
800 if (dev_pm_may_skip_resume(dev))
801 goto Skip;
802
803 if (dev->driver && dev->driver->pm) {
cf579dfb
RW
804 info = "early driver ";
805 callback = pm_late_early_op(dev->driver->pm, state);
806 }
807
6e176bf8 808Run:
cf579dfb 809 error = dpm_run_callback(callback, dev, state, info);
6e176bf8
RW
810
811Skip:
3d2699bc 812 dev->power.is_late_suspended = false;
cf579dfb 813
6e176bf8 814Out:
cf579dfb 815 TRACE_RESUME(error);
9f6d8f6a
RW
816
817 pm_runtime_enable(dev);
9e5e7910 818 complete_all(&dev->power.completion);
cf579dfb
RW
819 return error;
820}
821
9e5e7910
LC
822static void async_resume_early(void *data, async_cookie_t cookie)
823{
824 struct device *dev = (struct device *)data;
825 int error;
826
827 error = device_resume_early(dev, pm_transition, true);
828 if (error)
829 pm_dev_err(dev, pm_transition, " async", error);
830
831 put_device(dev);
832}
833
cf579dfb
RW
834/**
835 * dpm_resume_early - Execute "early resume" callbacks for all devices.
836 * @state: PM transition of the system being carried out.
837 */
2a8a8ce6 838void dpm_resume_early(pm_message_t state)
cf579dfb 839{
9e5e7910 840 struct device *dev;
cf579dfb
RW
841 ktime_t starttime = ktime_get();
842
bb3632c6 843 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
cf579dfb 844 mutex_lock(&dpm_list_mtx);
9e5e7910
LC
845 pm_transition = state;
846
847 /*
848 * Advanced the async threads upfront,
849 * in case the starting of async threads is
850 * delayed by non-async resuming devices.
851 */
f2a424f6
YL
852 list_for_each_entry(dev, &dpm_late_early_list, power.entry)
853 dpm_async_fn(dev, async_resume_early);
cf579dfb 854
9e5e7910
LC
855 while (!list_empty(&dpm_late_early_list)) {
856 dev = to_device(dpm_late_early_list.next);
cf579dfb
RW
857 get_device(dev);
858 list_move_tail(&dev->power.entry, &dpm_suspended_list);
859 mutex_unlock(&dpm_list_mtx);
860
9e5e7910
LC
861 if (!is_async(dev)) {
862 int error;
d08a5ace 863
9e5e7910
LC
864 error = device_resume_early(dev, state, false);
865 if (error) {
866 suspend_stats.failed_resume_early++;
867 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
868 dpm_save_failed_dev(dev_name(dev));
869 pm_dev_err(dev, state, " early", error);
870 }
871 }
5b219a51 872 mutex_lock(&dpm_list_mtx);
d08a5ace
RW
873 put_device(dev);
874 }
32bdfac5 875 mutex_unlock(&dpm_list_mtx);
9e5e7910 876 async_synchronize_full();
48059c09 877 dpm_show_time(starttime, state, 0, "early");
bb3632c6 878 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
775b64d2 879}
cf579dfb
RW
880
881/**
882 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
883 * @state: PM transition of the system being carried out.
884 */
885void dpm_resume_start(pm_message_t state)
886{
887 dpm_resume_noirq(state);
888 dpm_resume_early(state);
889}
890EXPORT_SYMBOL_GPL(dpm_resume_start);
775b64d2
RW
891
892/**
97df8c12 893 * device_resume - Execute "resume" callbacks for given device.
20d652d7
RW
894 * @dev: Device to handle.
895 * @state: PM transition of the system being carried out.
5af84b82 896 * @async: If true, the device is being resumed asynchronously.
775b64d2 897 */
97df8c12 898static int device_resume(struct device *dev, pm_message_t state, bool async)
775b64d2 899{
9cf519d1 900 pm_callback_t callback = NULL;
e3771fa9 901 const char *info = NULL;
775b64d2 902 int error = 0;
70fea60d 903 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
775b64d2
RW
904
905 TRACE_DEVICE(dev);
906 TRACE_RESUME(0);
cd59abfc 907
dbf37414
RW
908 if (dev->power.syscore)
909 goto Complete;
910
aae4518b
RW
911 if (dev->power.direct_complete) {
912 /* Match the pm_runtime_disable() in __device_suspend(). */
913 pm_runtime_enable(dev);
914 goto Complete;
915 }
916
0552e05f
RW
917 if (!dpm_wait_for_superior(dev, async))
918 goto Complete;
919
70fea60d 920 dpm_watchdog_set(&wd, dev);
8e9394ce 921 device_lock(dev);
7a8d37a3 922
f76b168b
AS
923 /*
924 * This is a fib. But we'll allow new children to be added below
925 * a resumed device, even if the device hasn't been completed yet.
926 */
927 dev->power.is_prepared = false;
97df8c12 928
6d0e0e84
AS
929 if (!dev->power.is_suspended)
930 goto Unlock;
931
564b905a 932 if (dev->pm_domain) {
9cf519d1
RW
933 info = "power domain ";
934 callback = pm_op(&dev->pm_domain->ops, state);
35cd133c 935 goto Driver;
7538e3db
RW
936 }
937
9659cc06 938 if (dev->type && dev->type->pm) {
9cf519d1
RW
939 info = "type ";
940 callback = pm_op(dev->type->pm, state);
35cd133c 941 goto Driver;
cd59abfc
AS
942 }
943
a380f2ed
RW
944 if (dev->class && dev->class->pm) {
945 info = "class ";
946 callback = pm_op(dev->class->pm, state);
947 goto Driver;
cd59abfc 948 }
9659cc06
RW
949
950 if (dev->bus) {
951 if (dev->bus->pm) {
35cd133c 952 info = "bus ";
9cf519d1 953 callback = pm_op(dev->bus->pm, state);
9659cc06 954 } else if (dev->bus->resume) {
35cd133c 955 info = "legacy bus ";
9cf519d1 956 callback = dev->bus->resume;
35cd133c 957 goto End;
9659cc06
RW
958 }
959 }
960
35cd133c
RW
961 Driver:
962 if (!callback && dev->driver && dev->driver->pm) {
963 info = "driver ";
964 callback = pm_op(dev->driver->pm, state);
965 }
966
1eede070 967 End:
9cf519d1 968 error = dpm_run_callback(callback, dev, state, info);
6d0e0e84
AS
969 dev->power.is_suspended = false;
970
971 Unlock:
8e9394ce 972 device_unlock(dev);
70fea60d 973 dpm_watchdog_clear(&wd);
dbf37414
RW
974
975 Complete:
5af84b82 976 complete_all(&dev->power.completion);
7a8d37a3 977
cd59abfc 978 TRACE_RESUME(error);
1e2ef05b 979
cd59abfc
AS
980 return error;
981}
982
5af84b82
RW
983static void async_resume(void *data, async_cookie_t cookie)
984{
985 struct device *dev = (struct device *)data;
986 int error;
987
97df8c12 988 error = device_resume(dev, pm_transition, true);
5af84b82
RW
989 if (error)
990 pm_dev_err(dev, pm_transition, " async", error);
991 put_device(dev);
992}
993
775b64d2 994/**
20d652d7
RW
995 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
996 * @state: PM transition of the system being carried out.
775b64d2 997 *
20d652d7
RW
998 * Execute the appropriate "resume" callback for all devices whose status
999 * indicates that they are suspended.
1eede070 1000 */
91e7c75b 1001void dpm_resume(pm_message_t state)
1eede070 1002{
97df8c12 1003 struct device *dev;
ecf762b2 1004 ktime_t starttime = ktime_get();
1eede070 1005
bb3632c6 1006 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
91e7c75b
RW
1007 might_sleep();
1008
1eede070 1009 mutex_lock(&dpm_list_mtx);
5af84b82 1010 pm_transition = state;
098dff73 1011 async_error = 0;
1eede070 1012
f2a424f6
YL
1013 list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1014 dpm_async_fn(dev, async_resume);
97df8c12 1015
8a43a9ab
RW
1016 while (!list_empty(&dpm_suspended_list)) {
1017 dev = to_device(dpm_suspended_list.next);
1eede070 1018 get_device(dev);
5b219a51 1019 if (!is_async(dev)) {
1eede070
RW
1020 int error;
1021
1eede070
RW
1022 mutex_unlock(&dpm_list_mtx);
1023
97df8c12 1024 error = device_resume(dev, state, false);
2a77c46d
SL
1025 if (error) {
1026 suspend_stats.failed_resume++;
1027 dpm_save_failed_step(SUSPEND_RESUME);
1028 dpm_save_failed_dev(dev_name(dev));
1eede070 1029 pm_dev_err(dev, state, "", error);
2a77c46d 1030 }
5b219a51
RW
1031
1032 mutex_lock(&dpm_list_mtx);
1eede070
RW
1033 }
1034 if (!list_empty(&dev->power.entry))
8a43a9ab 1035 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1eede070
RW
1036 put_device(dev);
1037 }
1eede070 1038 mutex_unlock(&dpm_list_mtx);
5af84b82 1039 async_synchronize_full();
48059c09 1040 dpm_show_time(starttime, state, 0, NULL);
2f0aea93
VK
1041
1042 cpufreq_resume();
6e863844 1043 devfreq_resume();
bb3632c6 1044 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1eede070
RW
1045}
1046
1047/**
20d652d7
RW
1048 * device_complete - Complete a PM transition for given device.
1049 * @dev: Device to handle.
1050 * @state: PM transition of the system being carried out.
1eede070 1051 */
d1616302 1052static void device_complete(struct device *dev, pm_message_t state)
1eede070 1053{
35cd133c 1054 void (*callback)(struct device *) = NULL;
e3771fa9 1055 const char *info = NULL;
35cd133c 1056
dbf37414
RW
1057 if (dev->power.syscore)
1058 return;
1059
8e9394ce 1060 device_lock(dev);
1eede070 1061
564b905a 1062 if (dev->pm_domain) {
35cd133c
RW
1063 info = "completing power domain ";
1064 callback = dev->pm_domain->ops.complete;
4d27e9dc 1065 } else if (dev->type && dev->type->pm) {
35cd133c
RW
1066 info = "completing type ";
1067 callback = dev->type->pm->complete;
9659cc06 1068 } else if (dev->class && dev->class->pm) {
35cd133c
RW
1069 info = "completing class ";
1070 callback = dev->class->pm->complete;
9659cc06 1071 } else if (dev->bus && dev->bus->pm) {
35cd133c
RW
1072 info = "completing bus ";
1073 callback = dev->bus->pm->complete;
1074 }
1075
1076 if (!callback && dev->driver && dev->driver->pm) {
1077 info = "completing driver ";
1078 callback = dev->driver->pm->complete;
1079 }
1080
1081 if (callback) {
1082 pm_dev_dbg(dev, state, info);
1083 callback(dev);
1eede070
RW
1084 }
1085
8e9394ce 1086 device_unlock(dev);
88d26136 1087
af939339 1088 pm_runtime_put(dev);
1eede070
RW
1089}
1090
1091/**
20d652d7
RW
1092 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1093 * @state: PM transition of the system being carried out.
775b64d2 1094 *
20d652d7
RW
1095 * Execute the ->complete() callbacks for all devices whose PM status is not
1096 * DPM_ON (this allows new devices to be registered).
cd59abfc 1097 */
91e7c75b 1098void dpm_complete(pm_message_t state)
cd59abfc 1099{
1eede070
RW
1100 struct list_head list;
1101
bb3632c6 1102 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
91e7c75b
RW
1103 might_sleep();
1104
1eede070 1105 INIT_LIST_HEAD(&list);
cd59abfc 1106 mutex_lock(&dpm_list_mtx);
8a43a9ab
RW
1107 while (!list_empty(&dpm_prepared_list)) {
1108 struct device *dev = to_device(dpm_prepared_list.prev);
cd59abfc 1109
1eede070 1110 get_device(dev);
f76b168b 1111 dev->power.is_prepared = false;
5b219a51
RW
1112 list_move(&dev->power.entry, &list);
1113 mutex_unlock(&dpm_list_mtx);
1eede070 1114
32e8d689 1115 trace_device_pm_callback_start(dev, "", state.event);
5b219a51 1116 device_complete(dev, state);
32e8d689 1117 trace_device_pm_callback_end(dev, 0);
1eede070 1118
5b219a51 1119 mutex_lock(&dpm_list_mtx);
1eede070 1120 put_device(dev);
cd59abfc 1121 }
1eede070 1122 list_splice(&list, &dpm_list);
cd59abfc 1123 mutex_unlock(&dpm_list_mtx);
013c074f
SG
1124
1125 /* Allow device probing and trigger re-probing of deferred devices */
1126 device_unblock_probing();
bb3632c6 1127 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
cd59abfc
AS
1128}
1129
cd59abfc 1130/**
20d652d7
RW
1131 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1132 * @state: PM transition of the system being carried out.
cd59abfc 1133 *
20d652d7
RW
1134 * Execute "resume" callbacks for all devices and complete the PM transition of
1135 * the system.
cd59abfc 1136 */
d1616302 1137void dpm_resume_end(pm_message_t state)
cd59abfc 1138{
1eede070
RW
1139 dpm_resume(state);
1140 dpm_complete(state);
cd59abfc 1141}
d1616302 1142EXPORT_SYMBOL_GPL(dpm_resume_end);
cd59abfc
AS
1143
1144
1145/*------------------------- Suspend routines -------------------------*/
1146
1eede070 1147/**
20d652d7
RW
1148 * resume_event - Return a "resume" message for given "suspend" sleep state.
1149 * @sleep_state: PM message representing a sleep state.
1150 *
1151 * Return a PM message representing the resume event corresponding to given
1152 * sleep state.
1eede070
RW
1153 */
1154static pm_message_t resume_event(pm_message_t sleep_state)
cd59abfc 1155{
1eede070
RW
1156 switch (sleep_state.event) {
1157 case PM_EVENT_SUSPEND:
1158 return PMSG_RESUME;
1159 case PM_EVENT_FREEZE:
1160 case PM_EVENT_QUIESCE:
1161 return PMSG_RECOVER;
1162 case PM_EVENT_HIBERNATE:
1163 return PMSG_RESTORE;
cd59abfc 1164 }
1eede070 1165 return PMSG_ON;
cd59abfc
AS
1166}
1167
0d4b54c6
RW
1168static void dpm_superior_set_must_resume(struct device *dev)
1169{
1170 struct device_link *link;
1171 int idx;
1172
1173 if (dev->parent)
1174 dev->parent->power.must_resume = true;
1175
1176 idx = device_links_read_lock();
1177
42beb82e 1178 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
0d4b54c6
RW
1179 link->supplier->power.must_resume = true;
1180
1181 device_links_read_unlock(idx);
1182}
1183
cd59abfc 1184/**
b082ddd8 1185 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
20d652d7
RW
1186 * @dev: Device to handle.
1187 * @state: PM transition of the system being carried out.
58c256a3 1188 * @async: If true, the device is being suspended asynchronously.
775b64d2 1189 *
20d652d7
RW
1190 * The driver of @dev will not receive interrupts while this function is being
1191 * executed.
cd59abfc 1192 */
28b6fd6e 1193static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
775b64d2 1194{
30205377
RW
1195 pm_callback_t callback = NULL;
1196 const char *info = NULL;
28b6fd6e
LC
1197 int error = 0;
1198
431d452a
ZF
1199 TRACE_DEVICE(dev);
1200 TRACE_SUSPEND(0);
1201
098c3055 1202 dpm_wait_for_subordinate(dev, async);
6f75c3fd 1203
28b6fd6e
LC
1204 if (async_error)
1205 goto Complete;
1206
aae4518b 1207 if (dev->power.syscore || dev->power.direct_complete)
28b6fd6e
LC
1208 goto Complete;
1209
30205377
RW
1210 if (dev->pm_domain) {
1211 info = "noirq power domain ";
1212 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1213 } else if (dev->type && dev->type->pm) {
1214 info = "noirq type ";
1215 callback = pm_noirq_op(dev->type->pm, state);
1216 } else if (dev->class && dev->class->pm) {
1217 info = "noirq class ";
1218 callback = pm_noirq_op(dev->class->pm, state);
1219 } else if (dev->bus && dev->bus->pm) {
1220 info = "noirq bus ";
1221 callback = pm_noirq_op(dev->bus->pm, state);
1222 }
75e94645
RW
1223 if (callback)
1224 goto Run;
7538e3db 1225
107d47b2 1226 if (dev_pm_smart_suspend_and_suspended(dev))
75e94645
RW
1227 goto Skip;
1228
1229 if (dev->driver && dev->driver->pm) {
cf579dfb 1230 info = "noirq driver ";
35cd133c
RW
1231 callback = pm_noirq_op(dev->driver->pm, state);
1232 }
1233
75e94645 1234Run:
3d2699bc 1235 error = dpm_run_callback(callback, dev, state, info);
0d4b54c6 1236 if (error) {
28b6fd6e 1237 async_error = error;
0d4b54c6
RW
1238 goto Complete;
1239 }
1240
75e94645 1241Skip:
0d4b54c6
RW
1242 dev->power.is_noirq_suspended = true;
1243
107d47b2
RW
1244 /*
1245 * Skipping the resume of devices that were in use right before the
1246 * system suspend (as indicated by their PM-runtime usage counters)
1247 * would be suboptimal. Also resume them if doing that is not allowed
1248 * to be skipped.
1249 */
1250 if (atomic_read(&dev->power.usage_count) > 1 ||
1251 !(dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED) &&
1252 dev->power.may_skip_resume))
0d4b54c6 1253 dev->power.must_resume = true;
0d4b54c6
RW
1254
1255 if (dev->power.must_resume)
1256 dpm_superior_set_must_resume(dev);
3d2699bc 1257
28b6fd6e
LC
1258Complete:
1259 complete_all(&dev->power.completion);
431d452a 1260 TRACE_SUSPEND(error);
3d2699bc 1261 return error;
775b64d2
RW
1262}
1263
28b6fd6e
LC
1264static void async_suspend_noirq(void *data, async_cookie_t cookie)
1265{
1266 struct device *dev = (struct device *)data;
1267 int error;
1268
1269 error = __device_suspend_noirq(dev, pm_transition, true);
1270 if (error) {
1271 dpm_save_failed_dev(dev_name(dev));
1272 pm_dev_err(dev, pm_transition, " async", error);
1273 }
1274
1275 put_device(dev);
1276}
1277
1278static int device_suspend_noirq(struct device *dev)
1279{
f2a424f6 1280 if (dpm_async_fn(dev, async_suspend_noirq))
28b6fd6e 1281 return 0;
f2a424f6 1282
28b6fd6e
LC
1283 return __device_suspend_noirq(dev, pm_transition, false);
1284}
1285
b605c44c 1286static int dpm_noirq_suspend_devices(pm_message_t state)
775b64d2 1287{
ecf762b2 1288 ktime_t starttime = ktime_get();
775b64d2
RW
1289 int error = 0;
1290
bb3632c6 1291 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
32bdfac5 1292 mutex_lock(&dpm_list_mtx);
28b6fd6e
LC
1293 pm_transition = state;
1294 async_error = 0;
1295
cf579dfb
RW
1296 while (!list_empty(&dpm_late_early_list)) {
1297 struct device *dev = to_device(dpm_late_early_list.prev);
d08a5ace
RW
1298
1299 get_device(dev);
1300 mutex_unlock(&dpm_list_mtx);
1301
28b6fd6e 1302 error = device_suspend_noirq(dev);
d08a5ace
RW
1303
1304 mutex_lock(&dpm_list_mtx);
775b64d2 1305 if (error) {
cf579dfb 1306 pm_dev_err(dev, state, " noirq", error);
2a77c46d 1307 dpm_save_failed_dev(dev_name(dev));
d08a5ace 1308 put_device(dev);
775b64d2
RW
1309 break;
1310 }
d08a5ace 1311 if (!list_empty(&dev->power.entry))
8a43a9ab 1312 list_move(&dev->power.entry, &dpm_noirq_list);
d08a5ace 1313 put_device(dev);
52d136cc 1314
28b6fd6e 1315 if (async_error)
52d136cc 1316 break;
775b64d2 1317 }
32bdfac5 1318 mutex_unlock(&dpm_list_mtx);
28b6fd6e
LC
1319 async_synchronize_full();
1320 if (!error)
1321 error = async_error;
1322
1323 if (error) {
1324 suspend_stats.failed_suspend_noirq++;
1325 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
28b6fd6e 1326 }
48059c09 1327 dpm_show_time(starttime, state, error, "noirq");
bb3632c6 1328 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
cf579dfb
RW
1329 return error;
1330}
1331
786f41fb
RW
1332/**
1333 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1334 * @state: PM transition of the system being carried out.
1335 *
1336 * Prevent device drivers' interrupt handlers from being called and invoke
1337 * "noirq" suspend callbacks for all non-sysdev devices.
1338 */
1339int dpm_suspend_noirq(pm_message_t state)
1340{
1341 int ret;
1342
b605c44c
RW
1343 cpuidle_pause();
1344
1345 device_wakeup_arm_wake_irqs();
1346 suspend_device_irqs();
1347
786f41fb
RW
1348 ret = dpm_noirq_suspend_devices(state);
1349 if (ret)
1350 dpm_resume_noirq(resume_event(state));
1351
1352 return ret;
1353}
1354
0a99d767
UH
1355static void dpm_propagate_wakeup_to_parent(struct device *dev)
1356{
1357 struct device *parent = dev->parent;
1358
1359 if (!parent)
1360 return;
1361
1362 spin_lock_irq(&parent->power.lock);
1363
1364 if (dev->power.wakeup_path && !parent->power.ignore_children)
1365 parent->power.wakeup_path = true;
1366
1367 spin_unlock_irq(&parent->power.lock);
1368}
1369
cf579dfb 1370/**
b082ddd8 1371 * __device_suspend_late - Execute a "late suspend" callback for given device.
cf579dfb
RW
1372 * @dev: Device to handle.
1373 * @state: PM transition of the system being carried out.
58c256a3 1374 * @async: If true, the device is being suspended asynchronously.
cf579dfb
RW
1375 *
1376 * Runtime PM is disabled for @dev while this function is being executed.
1377 */
de377b39 1378static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
cf579dfb 1379{
30205377
RW
1380 pm_callback_t callback = NULL;
1381 const char *info = NULL;
de377b39 1382 int error = 0;
cf579dfb 1383
431d452a
ZF
1384 TRACE_DEVICE(dev);
1385 TRACE_SUSPEND(0);
1386
9f6d8f6a
RW
1387 __pm_runtime_disable(dev, false);
1388
098c3055 1389 dpm_wait_for_subordinate(dev, async);
6f75c3fd 1390
de377b39
LC
1391 if (async_error)
1392 goto Complete;
1393
1394 if (pm_wakeup_pending()) {
1395 async_error = -EBUSY;
1396 goto Complete;
1397 }
1398
aae4518b 1399 if (dev->power.syscore || dev->power.direct_complete)
de377b39
LC
1400 goto Complete;
1401
30205377
RW
1402 if (dev->pm_domain) {
1403 info = "late power domain ";
1404 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1405 } else if (dev->type && dev->type->pm) {
1406 info = "late type ";
1407 callback = pm_late_early_op(dev->type->pm, state);
1408 } else if (dev->class && dev->class->pm) {
1409 info = "late class ";
1410 callback = pm_late_early_op(dev->class->pm, state);
1411 } else if (dev->bus && dev->bus->pm) {
1412 info = "late bus ";
1413 callback = pm_late_early_op(dev->bus->pm, state);
1414 }
75e94645
RW
1415 if (callback)
1416 goto Run;
cf579dfb 1417
107d47b2
RW
1418 if (dev_pm_smart_suspend_and_suspended(dev)) {
1419 /*
1420 * In principle, the resume of the device may be skippend if it
1421 * remains in runtime suspend at this point.
1422 */
1423 dev->power.may_skip_resume = true;
75e94645 1424 goto Skip;
107d47b2 1425 }
75e94645
RW
1426
1427 if (dev->driver && dev->driver->pm) {
cf579dfb
RW
1428 info = "late driver ";
1429 callback = pm_late_early_op(dev->driver->pm, state);
1430 }
1431
75e94645 1432Run:
3d2699bc 1433 error = dpm_run_callback(callback, dev, state, info);
75e94645 1434 if (error) {
de377b39 1435 async_error = error;
75e94645
RW
1436 goto Complete;
1437 }
0a99d767 1438 dpm_propagate_wakeup_to_parent(dev);
75e94645
RW
1439
1440Skip:
1441 dev->power.is_late_suspended = true;
3d2699bc 1442
de377b39 1443Complete:
431d452a 1444 TRACE_SUSPEND(error);
de377b39 1445 complete_all(&dev->power.completion);
3d2699bc 1446 return error;
cf579dfb
RW
1447}
1448
de377b39
LC
1449static void async_suspend_late(void *data, async_cookie_t cookie)
1450{
1451 struct device *dev = (struct device *)data;
1452 int error;
1453
1454 error = __device_suspend_late(dev, pm_transition, true);
1455 if (error) {
1456 dpm_save_failed_dev(dev_name(dev));
1457 pm_dev_err(dev, pm_transition, " async", error);
1458 }
1459 put_device(dev);
1460}
1461
1462static int device_suspend_late(struct device *dev)
1463{
f2a424f6 1464 if (dpm_async_fn(dev, async_suspend_late))
de377b39 1465 return 0;
de377b39
LC
1466
1467 return __device_suspend_late(dev, pm_transition, false);
1468}
1469
cf579dfb
RW
1470/**
1471 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1472 * @state: PM transition of the system being carried out.
1473 */
2a8a8ce6 1474int dpm_suspend_late(pm_message_t state)
cf579dfb
RW
1475{
1476 ktime_t starttime = ktime_get();
1477 int error = 0;
1478
bb3632c6 1479 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
cf579dfb 1480 mutex_lock(&dpm_list_mtx);
de377b39
LC
1481 pm_transition = state;
1482 async_error = 0;
1483
cf579dfb
RW
1484 while (!list_empty(&dpm_suspended_list)) {
1485 struct device *dev = to_device(dpm_suspended_list.prev);
1486
1487 get_device(dev);
1488 mutex_unlock(&dpm_list_mtx);
1489
de377b39 1490 error = device_suspend_late(dev);
cf579dfb
RW
1491
1492 mutex_lock(&dpm_list_mtx);
3a17fb32
RW
1493 if (!list_empty(&dev->power.entry))
1494 list_move(&dev->power.entry, &dpm_late_early_list);
1495
cf579dfb
RW
1496 if (error) {
1497 pm_dev_err(dev, state, " late", error);
cf579dfb
RW
1498 dpm_save_failed_dev(dev_name(dev));
1499 put_device(dev);
1500 break;
1501 }
cf579dfb 1502 put_device(dev);
52d136cc 1503
de377b39 1504 if (async_error)
52d136cc 1505 break;
cf579dfb
RW
1506 }
1507 mutex_unlock(&dpm_list_mtx);
de377b39 1508 async_synchronize_full();
246ef766
ID
1509 if (!error)
1510 error = async_error;
de377b39
LC
1511 if (error) {
1512 suspend_stats.failed_suspend_late++;
1513 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
cf579dfb 1514 dpm_resume_early(resume_event(state));
de377b39 1515 }
48059c09 1516 dpm_show_time(starttime, state, error, "late");
bb3632c6 1517 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
775b64d2
RW
1518 return error;
1519}
cf579dfb
RW
1520
1521/**
1522 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1523 * @state: PM transition of the system being carried out.
1524 */
1525int dpm_suspend_end(pm_message_t state)
1526{
3540d38d
BVA
1527 ktime_t starttime = ktime_get();
1528 int error;
1529
1530 error = dpm_suspend_late(state);
064b021f 1531 if (error)
3540d38d 1532 goto out;
064b021f
CC
1533
1534 error = dpm_suspend_noirq(state);
3540d38d 1535 if (error)
997a0311 1536 dpm_resume_early(resume_event(state));
cf579dfb 1537
3540d38d
BVA
1538out:
1539 dpm_show_time(starttime, state, error, "end");
1540 return error;
cf579dfb
RW
1541}
1542EXPORT_SYMBOL_GPL(dpm_suspend_end);
775b64d2 1543
875ab0b7
RW
1544/**
1545 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
0a884223
RD
1546 * @dev: Device to suspend.
1547 * @state: PM transition of the system being carried out.
1548 * @cb: Suspend callback to execute.
58c256a3 1549 * @info: string description of caller.
875ab0b7
RW
1550 */
1551static int legacy_suspend(struct device *dev, pm_message_t state,
53644677 1552 int (*cb)(struct device *dev, pm_message_t state),
e3771fa9 1553 const char *info)
875ab0b7
RW
1554{
1555 int error;
1556 ktime_t calltime;
1557
7f817ba9 1558 calltime = initcall_debug_start(dev, cb);
875ab0b7 1559
e8bca479 1560 trace_device_pm_callback_start(dev, info, state.event);
875ab0b7 1561 error = cb(dev, state);
e8bca479 1562 trace_device_pm_callback_end(dev, error);
875ab0b7
RW
1563 suspend_report_result(cb, error);
1564
7f817ba9 1565 initcall_debug_report(dev, calltime, cb, error);
875ab0b7
RW
1566
1567 return error;
1172ee31
UH
1568}
1569
c23bd387 1570static void dpm_clear_superiors_direct_complete(struct device *dev)
8c73b428
RW
1571{
1572 struct device_link *link;
1573 int idx;
1574
c23bd387
UH
1575 if (dev->parent) {
1576 spin_lock_irq(&dev->parent->power.lock);
1577 dev->parent->power.direct_complete = false;
1578 spin_unlock_irq(&dev->parent->power.lock);
1579 }
1580
8c73b428
RW
1581 idx = device_links_read_lock();
1582
42beb82e 1583 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
8c73b428
RW
1584 spin_lock_irq(&link->supplier->power.lock);
1585 link->supplier->power.direct_complete = false;
1586 spin_unlock_irq(&link->supplier->power.lock);
1587 }
1588
1589 device_links_read_unlock(idx);
1590}
1591
775b64d2 1592/**
b082ddd8 1593 * __device_suspend - Execute "suspend" callbacks for given device.
20d652d7
RW
1594 * @dev: Device to handle.
1595 * @state: PM transition of the system being carried out.
5af84b82 1596 * @async: If true, the device is being suspended asynchronously.
775b64d2 1597 */
5af84b82 1598static int __device_suspend(struct device *dev, pm_message_t state, bool async)
cd59abfc 1599{
9cf519d1 1600 pm_callback_t callback = NULL;
e3771fa9 1601 const char *info = NULL;
cd59abfc 1602 int error = 0;
70fea60d 1603 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
cd59abfc 1604
431d452a
ZF
1605 TRACE_DEVICE(dev);
1606 TRACE_SUSPEND(0);
1607
8c73b428 1608 dpm_wait_for_subordinate(dev, async);
7a8d37a3 1609
69e445ab
RW
1610 if (async_error) {
1611 dev->power.direct_complete = false;
1f758b23 1612 goto Complete;
69e445ab 1613 }
1e2ef05b 1614
88d26136
AS
1615 /*
1616 * If a device configured to wake up the system from sleep states
1617 * has been suspended at run time and there's a resume request pending
1618 * for it, this is equivalent to the device signaling wakeup, so the
1619 * system suspend operation should be aborted.
1620 */
1e2ef05b
RW
1621 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1622 pm_wakeup_event(dev, 0);
5af84b82 1623
d83f905e 1624 if (pm_wakeup_pending()) {
69e445ab 1625 dev->power.direct_complete = false;
d83f905e 1626 async_error = -EBUSY;
1f758b23 1627 goto Complete;
d83f905e
RW
1628 }
1629
dbf37414
RW
1630 if (dev->power.syscore)
1631 goto Complete;
1632
dc351d4c
UH
1633 /* Avoid direct_complete to let wakeup_path propagate. */
1634 if (device_may_wakeup(dev) || dev->power.wakeup_path)
1635 dev->power.direct_complete = false;
1636
aae4518b
RW
1637 if (dev->power.direct_complete) {
1638 if (pm_runtime_status_suspended(dev)) {
1639 pm_runtime_disable(dev);
4a0fa9f9
RW
1640 if (pm_runtime_status_suspended(dev)) {
1641 pm_dev_dbg(dev, state, "direct-complete ");
aae4518b 1642 goto Complete;
4a0fa9f9 1643 }
aae4518b
RW
1644
1645 pm_runtime_enable(dev);
1646 }
1647 dev->power.direct_complete = false;
1648 }
1649
0d4b54c6
RW
1650 dev->power.may_skip_resume = false;
1651 dev->power.must_resume = false;
1652
70fea60d 1653 dpm_watchdog_set(&wd, dev);
1e2ef05b
RW
1654 device_lock(dev);
1655
564b905a 1656 if (dev->pm_domain) {
9cf519d1
RW
1657 info = "power domain ";
1658 callback = pm_op(&dev->pm_domain->ops, state);
1659 goto Run;
4d27e9dc
RW
1660 }
1661
9659cc06 1662 if (dev->type && dev->type->pm) {
9cf519d1
RW
1663 info = "type ";
1664 callback = pm_op(dev->type->pm, state);
1665 goto Run;
9659cc06
RW
1666 }
1667
a380f2ed
RW
1668 if (dev->class && dev->class->pm) {
1669 info = "class ";
1670 callback = pm_op(dev->class->pm, state);
1671 goto Run;
cd59abfc
AS
1672 }
1673
1eede070
RW
1674 if (dev->bus) {
1675 if (dev->bus->pm) {
35cd133c 1676 info = "bus ";
9cf519d1 1677 callback = pm_op(dev->bus->pm, state);
1eede070 1678 } else if (dev->bus->suspend) {
35cd133c 1679 pm_dev_dbg(dev, state, "legacy bus ");
53644677
SK
1680 error = legacy_suspend(dev, state, dev->bus->suspend,
1681 "legacy bus ");
9cf519d1 1682 goto End;
1eede070 1683 }
7538e3db
RW
1684 }
1685
9cf519d1 1686 Run:
35cd133c
RW
1687 if (!callback && dev->driver && dev->driver->pm) {
1688 info = "driver ";
1689 callback = pm_op(dev->driver->pm, state);
1690 }
1691
9cf519d1
RW
1692 error = dpm_run_callback(callback, dev, state, info);
1693
1eede070 1694 End:
4ca46ff3
RW
1695 if (!error) {
1696 dev->power.is_suspended = true;
8512220c
UH
1697 if (device_may_wakeup(dev))
1698 dev->power.wakeup_path = true;
1699
c23bd387
UH
1700 dpm_propagate_wakeup_to_parent(dev);
1701 dpm_clear_superiors_direct_complete(dev);
4ca46ff3 1702 }
6d0e0e84 1703
8e9394ce 1704 device_unlock(dev);
70fea60d 1705 dpm_watchdog_clear(&wd);
1f758b23
MSB
1706
1707 Complete:
88d26136 1708 if (error)
098dff73
RW
1709 async_error = error;
1710
05a92622 1711 complete_all(&dev->power.completion);
431d452a 1712 TRACE_SUSPEND(error);
cd59abfc
AS
1713 return error;
1714}
1715
5af84b82
RW
1716static void async_suspend(void *data, async_cookie_t cookie)
1717{
1718 struct device *dev = (struct device *)data;
1719 int error;
1720
1721 error = __device_suspend(dev, pm_transition, true);
2a77c46d
SL
1722 if (error) {
1723 dpm_save_failed_dev(dev_name(dev));
5af84b82 1724 pm_dev_err(dev, pm_transition, " async", error);
2a77c46d 1725 }
5af84b82
RW
1726
1727 put_device(dev);
1728}
1729
1730static int device_suspend(struct device *dev)
1731{
f2a424f6 1732 if (dpm_async_fn(dev, async_suspend))
5af84b82 1733 return 0;
5af84b82
RW
1734
1735 return __device_suspend(dev, pm_transition, false);
1736}
1737
cd59abfc 1738/**
20d652d7
RW
1739 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1740 * @state: PM transition of the system being carried out.
cd59abfc 1741 */
91e7c75b 1742int dpm_suspend(pm_message_t state)
cd59abfc 1743{
ecf762b2 1744 ktime_t starttime = ktime_get();
cd59abfc
AS
1745 int error = 0;
1746
bb3632c6 1747 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
91e7c75b
RW
1748 might_sleep();
1749
6e863844 1750 devfreq_suspend();
2f0aea93
VK
1751 cpufreq_suspend();
1752
cd59abfc 1753 mutex_lock(&dpm_list_mtx);
5af84b82
RW
1754 pm_transition = state;
1755 async_error = 0;
8a43a9ab
RW
1756 while (!list_empty(&dpm_prepared_list)) {
1757 struct device *dev = to_device(dpm_prepared_list.prev);
58aca232 1758
1eede070 1759 get_device(dev);
cd59abfc 1760 mutex_unlock(&dpm_list_mtx);
1eede070 1761
5af84b82 1762 error = device_suspend(dev);
1eede070 1763
1b3cbec1 1764 mutex_lock(&dpm_list_mtx);
775b64d2 1765 if (error) {
1eede070 1766 pm_dev_err(dev, state, "", error);
2a77c46d 1767 dpm_save_failed_dev(dev_name(dev));
1eede070 1768 put_device(dev);
775b64d2
RW
1769 break;
1770 }
7a8d37a3 1771 if (!list_empty(&dev->power.entry))
8a43a9ab 1772 list_move(&dev->power.entry, &dpm_suspended_list);
1eede070 1773 put_device(dev);
5af84b82
RW
1774 if (async_error)
1775 break;
cd59abfc
AS
1776 }
1777 mutex_unlock(&dpm_list_mtx);
5af84b82
RW
1778 async_synchronize_full();
1779 if (!error)
1780 error = async_error;
2a77c46d
SL
1781 if (error) {
1782 suspend_stats.failed_suspend++;
1783 dpm_save_failed_step(SUSPEND_SUSPEND);
48059c09
RW
1784 }
1785 dpm_show_time(starttime, state, error, NULL);
bb3632c6 1786 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1eede070
RW
1787 return error;
1788}
1789
1790/**
20d652d7
RW
1791 * device_prepare - Prepare a device for system power transition.
1792 * @dev: Device to handle.
1793 * @state: PM transition of the system being carried out.
1794 *
1795 * Execute the ->prepare() callback(s) for given device. No new children of the
1796 * device may be registered after this function has returned.
1eede070 1797 */
d1616302 1798static int device_prepare(struct device *dev, pm_message_t state)
1eede070 1799{
35cd133c 1800 int (*callback)(struct device *) = NULL;
aae4518b 1801 int ret = 0;
1eede070 1802
dbf37414
RW
1803 if (dev->power.syscore)
1804 return 0;
1805
88d26136
AS
1806 /*
1807 * If a device's parent goes into runtime suspend at the wrong time,
1808 * it won't be possible to resume the device. To prevent this we
1809 * block runtime suspend here, during the prepare phase, and allow
1810 * it again during the complete phase.
1811 */
1812 pm_runtime_get_noresume(dev);
1813
8e9394ce 1814 device_lock(dev);
1eede070 1815
8512220c 1816 dev->power.wakeup_path = false;
4ca46ff3 1817
c62ec461 1818 if (dev->power.no_pm_callbacks)
aa8e54b5 1819 goto unlock;
aa8e54b5 1820
fba1fbf5 1821 if (dev->pm_domain)
35cd133c 1822 callback = dev->pm_domain->ops.prepare;
fba1fbf5 1823 else if (dev->type && dev->type->pm)
35cd133c 1824 callback = dev->type->pm->prepare;
fba1fbf5 1825 else if (dev->class && dev->class->pm)
35cd133c 1826 callback = dev->class->pm->prepare;
fba1fbf5 1827 else if (dev->bus && dev->bus->pm)
35cd133c 1828 callback = dev->bus->pm->prepare;
35cd133c 1829
fba1fbf5 1830 if (!callback && dev->driver && dev->driver->pm)
35cd133c 1831 callback = dev->driver->pm->prepare;
35cd133c 1832
32e8d689 1833 if (callback)
aae4518b 1834 ret = callback(dev);
7538e3db 1835
aa8e54b5 1836unlock:
8e9394ce 1837 device_unlock(dev);
1eede070 1838
aae4518b
RW
1839 if (ret < 0) {
1840 suspend_report_result(callback, ret);
aa1b9f13 1841 pm_runtime_put(dev);
aae4518b
RW
1842 return ret;
1843 }
1844 /*
1845 * A positive return value from ->prepare() means "this device appears
1846 * to be runtime-suspended and its state is fine, so if it really is
1847 * runtime-suspended, you can leave it in that state provided that you
1848 * will do the same thing with all of its descendants". This only
1849 * applies to suspend transitions, however.
1850 */
1851 spin_lock_irq(&dev->power.lock);
08810a41 1852 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
b5252a6c 1853 (ret > 0 || dev->power.no_pm_callbacks) &&
08810a41 1854 !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
aae4518b
RW
1855 spin_unlock_irq(&dev->power.lock);
1856 return 0;
1eede070 1857}
cd59abfc 1858
1eede070 1859/**
20d652d7
RW
1860 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1861 * @state: PM transition of the system being carried out.
1eede070 1862 *
20d652d7 1863 * Execute the ->prepare() callback(s) for all devices.
1eede070 1864 */
91e7c75b 1865int dpm_prepare(pm_message_t state)
1eede070 1866{
1eede070
RW
1867 int error = 0;
1868
bb3632c6 1869 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
91e7c75b
RW
1870 might_sleep();
1871
013c074f
SG
1872 /*
1873 * Give a chance for the known devices to complete their probes, before
1874 * disable probing of devices. This sync point is important at least
1875 * at boot time + hibernation restore.
1876 */
1877 wait_for_device_probe();
1878 /*
1879 * It is unsafe if probing of devices will happen during suspend or
1880 * hibernation and system behavior will be unpredictable in this case.
1881 * So, let's prohibit device's probing here and defer their probes
1882 * instead. The normal behavior will be restored in dpm_complete().
1883 */
1884 device_block_probing();
1885
1eede070 1886 mutex_lock(&dpm_list_mtx);
1eede070
RW
1887 while (!list_empty(&dpm_list)) {
1888 struct device *dev = to_device(dpm_list.next);
1889
1890 get_device(dev);
1eede070
RW
1891 mutex_unlock(&dpm_list_mtx);
1892
32e8d689 1893 trace_device_pm_callback_start(dev, "", state.event);
1e2ef05b 1894 error = device_prepare(dev, state);
32e8d689 1895 trace_device_pm_callback_end(dev, error);
1eede070
RW
1896
1897 mutex_lock(&dpm_list_mtx);
1898 if (error) {
1eede070
RW
1899 if (error == -EAGAIN) {
1900 put_device(dev);
886a7a33 1901 error = 0;
1eede070
RW
1902 continue;
1903 }
7a5bd127 1904 pr_info("Device %s not prepared for power transition: code %d\n",
5c1a07ab 1905 dev_name(dev), error);
1eede070
RW
1906 put_device(dev);
1907 break;
1908 }
f76b168b 1909 dev->power.is_prepared = true;
1eede070 1910 if (!list_empty(&dev->power.entry))
8a43a9ab 1911 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1eede070
RW
1912 put_device(dev);
1913 }
1eede070 1914 mutex_unlock(&dpm_list_mtx);
bb3632c6 1915 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
cd59abfc
AS
1916 return error;
1917}
1918
775b64d2 1919/**
20d652d7
RW
1920 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1921 * @state: PM transition of the system being carried out.
775b64d2 1922 *
20d652d7
RW
1923 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1924 * callbacks for them.
775b64d2 1925 */
d1616302 1926int dpm_suspend_start(pm_message_t state)
775b64d2 1927{
3540d38d 1928 ktime_t starttime = ktime_get();
775b64d2 1929 int error;
cd59abfc 1930
1eede070 1931 error = dpm_prepare(state);
2a77c46d
SL
1932 if (error) {
1933 suspend_stats.failed_prepare++;
1934 dpm_save_failed_step(SUSPEND_PREPARE);
1935 } else
1eede070 1936 error = dpm_suspend(state);
3540d38d 1937 dpm_show_time(starttime, state, error, "start");
cd59abfc 1938 return error;
cd59abfc 1939}
d1616302 1940EXPORT_SYMBOL_GPL(dpm_suspend_start);
cd59abfc
AS
1941
1942void __suspend_report_result(const char *function, void *fn, int ret)
1943{
c80cfb04 1944 if (ret)
09686219 1945 pr_err("%s(): %pS returns %d\n", function, fn, ret);
cd59abfc
AS
1946}
1947EXPORT_SYMBOL_GPL(__suspend_report_result);
f8824cee
RW
1948
1949/**
1950 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
f8824cee 1951 * @subordinate: Device that needs to wait for @dev.
0b237cb2 1952 * @dev: Device to wait for.
f8824cee 1953 */
098dff73 1954int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
f8824cee
RW
1955{
1956 dpm_wait(dev, subordinate->power.async_suspend);
098dff73 1957 return async_error;
f8824cee
RW
1958}
1959EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
dfe3212e
ML
1960
1961/**
1962 * dpm_for_each_dev - device iterator.
1963 * @data: data for the callback.
1964 * @fn: function to be called for each device.
1965 *
1966 * Iterate over devices in dpm_list, and call @fn for each device,
1967 * passing it @data.
1968 */
1969void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1970{
1971 struct device *dev;
1972
1973 if (!fn)
1974 return;
1975
1976 device_pm_lock();
1977 list_for_each_entry(dev, &dpm_list, power.entry)
1978 fn(dev, data);
1979 device_pm_unlock();
1980}
1981EXPORT_SYMBOL_GPL(dpm_for_each_dev);
aa8e54b5
TV
1982
1983static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1984{
1985 if (!ops)
1986 return true;
1987
1988 return !ops->prepare &&
1989 !ops->suspend &&
1990 !ops->suspend_late &&
1991 !ops->suspend_noirq &&
1992 !ops->resume_noirq &&
1993 !ops->resume_early &&
1994 !ops->resume &&
1995 !ops->complete;
1996}
1997
1998void device_pm_check_callbacks(struct device *dev)
1999{
2000 spin_lock_irq(&dev->power.lock);
2001 dev->power.no_pm_callbacks =
157c460e
RW
2002 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2003 !dev->bus->suspend && !dev->bus->resume)) &&
a380f2ed 2004 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
aa8e54b5
TV
2005 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2006 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
157c460e
RW
2007 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2008 !dev->driver->suspend && !dev->driver->resume));
aa8e54b5
TV
2009 spin_unlock_irq(&dev->power.lock);
2010}
c4b65157
RW
2011
2012bool dev_pm_smart_suspend_and_suspended(struct device *dev)
2013{
2014 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2015 pm_runtime_status_suspended(dev);
2016}