Merge tag 'pm-5.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
[linux-2.6-block.git] / drivers / base / power / runtime.c
CommitLineData
5e928f77 1/*
62052ab1 2 * drivers/base/power/runtime.c - Helper functions for device runtime PM
5e928f77
RW
3 *
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
1bfee5bc 5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
5e928f77
RW
6 *
7 * This file is released under the GPLv2.
8 */
9
5b3cc15a 10#include <linux/sched/mm.h>
8234f673
VG
11#include <linux/ktime.h>
12#include <linux/hrtimer.h>
1b6bc32f 13#include <linux/export.h>
5e928f77 14#include <linux/pm_runtime.h>
4990d4fe 15#include <linux/pm_wakeirq.h>
c3dc2f14 16#include <trace/events/rpm.h>
21d5c57b
RW
17
18#include "../base.h"
7490e442 19#include "power.h"
5e928f77 20
dbcd2d72 21typedef int (*pm_callback_t)(struct device *);
5f59df79 22
dbcd2d72 23static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
5f59df79 24{
dbcd2d72
AH
25 pm_callback_t cb;
26 const struct dev_pm_ops *ops;
27
28 if (dev->pm_domain)
29 ops = &dev->pm_domain->ops;
30 else if (dev->type && dev->type->pm)
31 ops = dev->type->pm;
32 else if (dev->class && dev->class->pm)
33 ops = dev->class->pm;
34 else if (dev->bus && dev->bus->pm)
35 ops = dev->bus->pm;
36 else
37 ops = NULL;
38
39 if (ops)
40 cb = *(pm_callback_t *)((void *)ops + cb_offset);
41 else
42 cb = NULL;
43
44 if (!cb && dev->driver && dev->driver->pm)
45 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
46
47 return cb;
5f59df79
UH
48}
49
dbcd2d72
AH
50#define RPM_GET_CALLBACK(dev, callback) \
51 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
52
140a6c94 53static int rpm_resume(struct device *dev, int rpmflags);
7490e442 54static int rpm_suspend(struct device *dev, int rpmflags);
5e928f77 55
4769373c
AS
56/**
57 * update_pm_runtime_accounting - Update the time accounting of power states
58 * @dev: Device to update the accounting for
59 *
60 * In order to be able to have time accounting of the various power states
61 * (as used by programs such as PowerTOP to show the effectiveness of runtime
62 * PM), we need to track the time spent in each state.
63 * update_pm_runtime_accounting must be called each time before the
64 * runtime_status field is updated, to account the time in the old state
65 * correctly.
66 */
67void update_pm_runtime_accounting(struct device *dev)
68{
fed7e88c 69 u64 now, last, delta;
4769373c 70
fed7e88c
VG
71 if (dev->power.disable_depth > 0)
72 return;
73
74 last = dev->power.accounting_timestamp;
4769373c 75
fed7e88c 76 now = ktime_get_mono_fast_ns();
4769373c
AS
77 dev->power.accounting_timestamp = now;
78
c155f649
VG
79 /*
80 * Because ktime_get_mono_fast_ns() is not monotonic during
81 * timekeeping updates, ensure that 'now' is after the last saved
82 * timesptamp.
83 */
84 if (now < last)
4769373c
AS
85 return;
86
c155f649
VG
87 delta = now - last;
88
4769373c 89 if (dev->power.runtime_status == RPM_SUSPENDED)
a08c2a5a 90 dev->power.suspended_time += delta;
4769373c 91 else
a08c2a5a 92 dev->power.active_time += delta;
4769373c
AS
93}
94
95static void __update_runtime_status(struct device *dev, enum rpm_status status)
96{
97 update_pm_runtime_accounting(dev);
98 dev->power.runtime_status = status;
99}
100
8a62ffe2
VG
101u64 pm_runtime_suspended_time(struct device *dev)
102{
a08c2a5a
TG
103 u64 time;
104 unsigned long flags;
8a62ffe2
VG
105
106 spin_lock_irqsave(&dev->power.lock, flags);
107
108 update_pm_runtime_accounting(dev);
a08c2a5a 109 time = dev->power.suspended_time;
8a62ffe2
VG
110
111 spin_unlock_irqrestore(&dev->power.lock, flags);
112
a08c2a5a 113 return time;
8a62ffe2
VG
114}
115EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
116
5e928f77
RW
117/**
118 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
119 * @dev: Device to handle.
120 */
121static void pm_runtime_deactivate_timer(struct device *dev)
122{
123 if (dev->power.timer_expires > 0) {
74fb4486 124 hrtimer_try_to_cancel(&dev->power.suspend_timer);
5e928f77
RW
125 dev->power.timer_expires = 0;
126 }
127}
128
129/**
130 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
131 * @dev: Device to handle.
132 */
133static void pm_runtime_cancel_pending(struct device *dev)
134{
135 pm_runtime_deactivate_timer(dev);
136 /*
137 * In case there's a request pending, make sure its work function will
138 * return without doing anything.
139 */
140 dev->power.request = RPM_REQ_NONE;
141}
142
15bcb91d
AS
143/*
144 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
145 * @dev: Device to handle.
146 *
147 * Compute the autosuspend-delay expiration time based on the device's
148 * power.last_busy time. If the delay has already expired or is disabled
149 * (negative) or the power.use_autosuspend flag isn't set, return 0.
1f7b7081 150 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
15bcb91d
AS
151 *
152 * This function may be called either with or without dev->power.lock held.
153 * Either way it can be racy, since power.last_busy may be updated at any time.
154 */
8234f673 155u64 pm_runtime_autosuspend_expiration(struct device *dev)
15bcb91d
AS
156{
157 int autosuspend_delay;
f800ea32 158 u64 expires;
15bcb91d
AS
159
160 if (!dev->power.use_autosuspend)
f800ea32 161 return 0;
15bcb91d 162
6aa7de05 163 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
15bcb91d 164 if (autosuspend_delay < 0)
f800ea32 165 return 0;
15bcb91d 166
f800ea32
LM
167 expires = READ_ONCE(dev->power.last_busy);
168 expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
169 if (expires > ktime_get_mono_fast_ns())
170 return expires; /* Expires in the future */
15bcb91d 171
f800ea32 172 return 0;
15bcb91d
AS
173}
174EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
175
e823407f
ML
176static int dev_memalloc_noio(struct device *dev, void *data)
177{
178 return dev->power.memalloc_noio;
179}
180
181/*
182 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
183 * @dev: Device to handle.
184 * @enable: True for setting the flag and False for clearing the flag.
185 *
186 * Set the flag for all devices in the path from the device to the
187 * root device in the device tree if @enable is true, otherwise clear
188 * the flag for devices in the path whose siblings don't set the flag.
189 *
190 * The function should only be called by block device, or network
191 * device driver for solving the deadlock problem during runtime
192 * resume/suspend:
193 *
194 * If memory allocation with GFP_KERNEL is called inside runtime
195 * resume/suspend callback of any one of its ancestors(or the
196 * block device itself), the deadlock may be triggered inside the
197 * memory allocation since it might not complete until the block
198 * device becomes active and the involed page I/O finishes. The
199 * situation is pointed out first by Alan Stern. Network device
200 * are involved in iSCSI kind of situation.
201 *
202 * The lock of dev_hotplug_mutex is held in the function for handling
203 * hotplug race because pm_runtime_set_memalloc_noio() may be called
204 * in async probe().
205 *
206 * The function should be called between device_add() and device_del()
207 * on the affected device(block/network device).
208 */
209void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
210{
211 static DEFINE_MUTEX(dev_hotplug_mutex);
212
213 mutex_lock(&dev_hotplug_mutex);
214 for (;;) {
215 bool enabled;
216
217 /* hold power lock since bitfield is not SMP-safe. */
218 spin_lock_irq(&dev->power.lock);
219 enabled = dev->power.memalloc_noio;
220 dev->power.memalloc_noio = enable;
221 spin_unlock_irq(&dev->power.lock);
222
223 /*
224 * not need to enable ancestors any more if the device
225 * has been enabled.
226 */
227 if (enabled && enable)
228 break;
229
230 dev = dev->parent;
231
232 /*
233 * clear flag of the parent device only if all the
234 * children don't set the flag because ancestor's
235 * flag was set by any one of the descendants.
236 */
237 if (!dev || (!enable &&
238 device_for_each_child(dev, NULL,
239 dev_memalloc_noio)))
240 break;
241 }
242 mutex_unlock(&dev_hotplug_mutex);
243}
244EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
245
5e928f77 246/**
1bfee5bc
AS
247 * rpm_check_suspend_allowed - Test whether a device may be suspended.
248 * @dev: Device to test.
5e928f77 249 */
1bfee5bc 250static int rpm_check_suspend_allowed(struct device *dev)
5e928f77
RW
251{
252 int retval = 0;
253
5e928f77
RW
254 if (dev->power.runtime_error)
255 retval = -EINVAL;
632e270e
RW
256 else if (dev->power.disable_depth > 0)
257 retval = -EACCES;
258 else if (atomic_read(&dev->power.usage_count) > 0)
5e928f77 259 retval = -EAGAIN;
62006c17
UH
260 else if (!dev->power.ignore_children &&
261 atomic_read(&dev->power.child_count))
5e928f77 262 retval = -EBUSY;
1bfee5bc
AS
263
264 /* Pending resume requests take precedence over suspends. */
265 else if ((dev->power.deferred_resume
78ca7c37 266 && dev->power.runtime_status == RPM_SUSPENDING)
1bfee5bc
AS
267 || (dev->power.request_pending
268 && dev->power.request == RPM_REQ_RESUME))
269 retval = -EAGAIN;
0759e80b 270 else if (__dev_pm_qos_read_value(dev) == 0)
55d7ec45 271 retval = -EPERM;
1bfee5bc
AS
272 else if (dev->power.runtime_status == RPM_SUSPENDED)
273 retval = 1;
274
275 return retval;
276}
277
21d5c57b
RW
278static int rpm_get_suppliers(struct device *dev)
279{
280 struct device_link *link;
281
282 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
283 int retval;
284
285 if (!(link->flags & DL_FLAG_PM_RUNTIME))
286 continue;
287
288 if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND ||
289 link->rpm_active)
290 continue;
291
292 retval = pm_runtime_get_sync(link->supplier);
31eb7431
RW
293 /* Ignore suppliers with disabled runtime PM. */
294 if (retval < 0 && retval != -EACCES) {
21d5c57b
RW
295 pm_runtime_put_noidle(link->supplier);
296 return retval;
297 }
298 link->rpm_active = true;
299 }
300 return 0;
301}
302
303static void rpm_put_suppliers(struct device *dev)
304{
305 struct device_link *link;
306
307 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
308 if (link->rpm_active &&
309 READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) {
310 pm_runtime_put(link->supplier);
311 link->rpm_active = false;
312 }
313}
314
ad3c36a5
RW
315/**
316 * __rpm_callback - Run a given runtime PM callback for a given device.
317 * @cb: Runtime PM callback to run.
318 * @dev: Device to run the callback for.
319 */
320static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
321 __releases(&dev->power.lock) __acquires(&dev->power.lock)
322{
21d5c57b 323 int retval, idx;
baa8809f 324 bool use_links = dev->power.links_count > 0;
ad3c36a5 325
21d5c57b 326 if (dev->power.irq_safe) {
ad3c36a5 327 spin_unlock(&dev->power.lock);
21d5c57b 328 } else {
ad3c36a5
RW
329 spin_unlock_irq(&dev->power.lock);
330
21d5c57b
RW
331 /*
332 * Resume suppliers if necessary.
333 *
334 * The device's runtime PM status cannot change until this
335 * routine returns, so it is safe to read the status outside of
336 * the lock.
337 */
baa8809f 338 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
21d5c57b
RW
339 idx = device_links_read_lock();
340
341 retval = rpm_get_suppliers(dev);
342 if (retval)
343 goto fail;
344
345 device_links_read_unlock(idx);
346 }
347 }
348
ad3c36a5
RW
349 retval = cb(dev);
350
21d5c57b 351 if (dev->power.irq_safe) {
ad3c36a5 352 spin_lock(&dev->power.lock);
21d5c57b
RW
353 } else {
354 /*
355 * If the device is suspending and the callback has returned
356 * success, drop the usage counters of the suppliers that have
357 * been reference counted on its resume.
358 *
359 * Do that if resume fails too.
360 */
baa8809f
RW
361 if (use_links
362 && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
363 || (dev->power.runtime_status == RPM_RESUMING && retval))) {
21d5c57b
RW
364 idx = device_links_read_lock();
365
366 fail:
367 rpm_put_suppliers(dev);
368
369 device_links_read_unlock(idx);
370 }
371
ad3c36a5 372 spin_lock_irq(&dev->power.lock);
21d5c57b 373 }
ad3c36a5
RW
374
375 return retval;
376}
377
1bfee5bc 378/**
140a6c94 379 * rpm_idle - Notify device bus type if the device can be suspended.
1bfee5bc
AS
380 * @dev: Device to notify the bus type about.
381 * @rpmflags: Flag bits.
382 *
62052ab1 383 * Check if the device's runtime PM status allows it to be suspended. If
1bfee5bc
AS
384 * another idle notification has been started earlier, return immediately. If
385 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
d66e6db2
UH
386 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
387 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
1bfee5bc
AS
388 *
389 * This function must be called under dev->power.lock with interrupts disabled.
390 */
140a6c94 391static int rpm_idle(struct device *dev, int rpmflags)
1bfee5bc 392{
71c63122 393 int (*callback)(struct device *);
1bfee5bc
AS
394 int retval;
395
d7737ce9 396 trace_rpm_idle_rcuidle(dev, rpmflags);
1bfee5bc
AS
397 retval = rpm_check_suspend_allowed(dev);
398 if (retval < 0)
399 ; /* Conditions are wrong. */
400
401 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
402 else if (dev->power.runtime_status != RPM_ACTIVE)
403 retval = -EAGAIN;
404
405 /*
406 * Any pending request other than an idle notification takes
407 * precedence over us, except that the timer may be running.
408 */
409 else if (dev->power.request_pending &&
410 dev->power.request > RPM_REQ_IDLE)
411 retval = -EAGAIN;
412
413 /* Act as though RPM_NOWAIT is always set. */
414 else if (dev->power.idle_notification)
415 retval = -EINPROGRESS;
5e928f77
RW
416 if (retval)
417 goto out;
418
1bfee5bc
AS
419 /* Pending requests need to be canceled. */
420 dev->power.request = RPM_REQ_NONE;
421
45f0a85c 422 if (dev->power.no_callbacks)
7490e442 423 goto out;
7490e442 424
1bfee5bc
AS
425 /* Carry out an asynchronous or a synchronous idle notification. */
426 if (rpmflags & RPM_ASYNC) {
427 dev->power.request = RPM_REQ_IDLE;
428 if (!dev->power.request_pending) {
429 dev->power.request_pending = true;
430 queue_work(pm_wq, &dev->power.work);
5e928f77 431 }
d7737ce9 432 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
45f0a85c 433 return 0;
5e928f77
RW
434 }
435
436 dev->power.idle_notification = true;
437
dbcd2d72 438 callback = RPM_GET_CALLBACK(dev, runtime_idle);
35cd133c 439
ad3c36a5 440 if (callback)
45f0a85c 441 retval = __rpm_callback(callback, dev);
5e928f77
RW
442
443 dev->power.idle_notification = false;
444 wake_up_all(&dev->power.wait_queue);
445
446 out:
d7737ce9 447 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
d66e6db2 448 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
5e928f77
RW
449}
450
71c63122
RW
451/**
452 * rpm_callback - Run a given runtime PM callback for a given device.
453 * @cb: Runtime PM callback to run.
454 * @dev: Device to run the callback for.
455 */
456static int rpm_callback(int (*cb)(struct device *), struct device *dev)
71c63122
RW
457{
458 int retval;
459
460 if (!cb)
461 return -ENOSYS;
462
db88175f
ML
463 if (dev->power.memalloc_noio) {
464 unsigned int noio_flag;
465
466 /*
467 * Deadlock might be caused if memory allocation with
468 * GFP_KERNEL happens inside runtime_suspend and
469 * runtime_resume callbacks of one block device's
470 * ancestor or the block device itself. Network
471 * device might be thought as part of iSCSI block
472 * device, so network device and its ancestor should
473 * be marked as memalloc_noio too.
474 */
475 noio_flag = memalloc_noio_save();
476 retval = __rpm_callback(cb, dev);
477 memalloc_noio_restore(noio_flag);
478 } else {
479 retval = __rpm_callback(cb, dev);
480 }
71c63122 481
71c63122 482 dev->power.runtime_error = retval;
632e270e 483 return retval != -EACCES ? retval : -EIO;
71c63122
RW
484}
485
5e928f77 486/**
62052ab1 487 * rpm_suspend - Carry out runtime suspend of given device.
5e928f77 488 * @dev: Device to suspend.
3f9af051 489 * @rpmflags: Flag bits.
5e928f77 490 *
47d8f0ba
ML
491 * Check if the device's runtime PM status allows it to be suspended.
492 * Cancel a pending idle notification, autosuspend or suspend. If
493 * another suspend has been started earlier, either return immediately
494 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
495 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
857b36c7
ML
496 * otherwise run the ->runtime_suspend() callback directly. When
497 * ->runtime_suspend succeeded, if a deferred resume was requested while
498 * the callback was running then carry it out, otherwise send an idle
499 * notification for its parent (if the suspend succeeded and both
500 * ignore_children of parent->power and irq_safe of dev->power are not set).
886486b7
AS
501 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
502 * flag is set and the next autosuspend-delay expiration time is in the
503 * future, schedule another autosuspend attempt.
5e928f77
RW
504 *
505 * This function must be called under dev->power.lock with interrupts disabled.
506 */
140a6c94 507static int rpm_suspend(struct device *dev, int rpmflags)
5e928f77
RW
508 __releases(&dev->power.lock) __acquires(&dev->power.lock)
509{
71c63122 510 int (*callback)(struct device *);
5e928f77 511 struct device *parent = NULL;
1bfee5bc 512 int retval;
5e928f77 513
77893577 514 trace_rpm_suspend_rcuidle(dev, rpmflags);
5e928f77
RW
515
516 repeat:
1bfee5bc 517 retval = rpm_check_suspend_allowed(dev);
5e928f77 518
1bfee5bc
AS
519 if (retval < 0)
520 ; /* Conditions are wrong. */
521
522 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
523 else if (dev->power.runtime_status == RPM_RESUMING &&
524 !(rpmflags & RPM_ASYNC))
5e928f77 525 retval = -EAGAIN;
1bfee5bc 526 if (retval)
5e928f77 527 goto out;
5e928f77 528
15bcb91d
AS
529 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
530 if ((rpmflags & RPM_AUTO)
531 && dev->power.runtime_status != RPM_SUSPENDING) {
8234f673 532 u64 expires = pm_runtime_autosuspend_expiration(dev);
15bcb91d
AS
533
534 if (expires != 0) {
535 /* Pending requests need to be canceled. */
536 dev->power.request = RPM_REQ_NONE;
537
538 /*
539 * Optimization: If the timer is already running and is
540 * set to expire at or before the autosuspend delay,
541 * avoid the overhead of resetting it. Just let it
542 * expire; pm_suspend_timer_fn() will take care of the
543 * rest.
544 */
8234f673
VG
545 if (!(dev->power.timer_expires &&
546 dev->power.timer_expires <= expires)) {
547 /*
548 * We add a slack of 25% to gather wakeups
549 * without sacrificing the granularity.
550 */
ca27e4cd 551 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
8234f673
VG
552 (NSEC_PER_MSEC >> 2);
553
15bcb91d 554 dev->power.timer_expires = expires;
8234f673
VG
555 hrtimer_start_range_ns(&dev->power.suspend_timer,
556 ns_to_ktime(expires),
557 slack,
558 HRTIMER_MODE_ABS);
15bcb91d
AS
559 }
560 dev->power.timer_autosuspends = 1;
561 goto out;
562 }
563 }
564
5e928f77
RW
565 /* Other scheduled or pending requests need to be canceled. */
566 pm_runtime_cancel_pending(dev);
567
5e928f77
RW
568 if (dev->power.runtime_status == RPM_SUSPENDING) {
569 DEFINE_WAIT(wait);
570
1bfee5bc 571 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
5e928f77
RW
572 retval = -EINPROGRESS;
573 goto out;
574 }
575
ad3c36a5
RW
576 if (dev->power.irq_safe) {
577 spin_unlock(&dev->power.lock);
578
579 cpu_relax();
580
581 spin_lock(&dev->power.lock);
582 goto repeat;
583 }
584
5e928f77
RW
585 /* Wait for the other suspend running in parallel with us. */
586 for (;;) {
587 prepare_to_wait(&dev->power.wait_queue, &wait,
588 TASK_UNINTERRUPTIBLE);
589 if (dev->power.runtime_status != RPM_SUSPENDING)
590 break;
591
592 spin_unlock_irq(&dev->power.lock);
593
594 schedule();
595
596 spin_lock_irq(&dev->power.lock);
597 }
598 finish_wait(&dev->power.wait_queue, &wait);
599 goto repeat;
600 }
601
7490e442
AS
602 if (dev->power.no_callbacks)
603 goto no_callback; /* Assume success. */
604
1bfee5bc
AS
605 /* Carry out an asynchronous or a synchronous suspend. */
606 if (rpmflags & RPM_ASYNC) {
15bcb91d
AS
607 dev->power.request = (rpmflags & RPM_AUTO) ?
608 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
1bfee5bc
AS
609 if (!dev->power.request_pending) {
610 dev->power.request_pending = true;
611 queue_work(pm_wq, &dev->power.work);
612 }
613 goto out;
614 }
615
8d4b9d1b 616 __update_runtime_status(dev, RPM_SUSPENDING);
5e928f77 617
dbcd2d72 618 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
35cd133c 619
bed57030 620 dev_pm_enable_wake_irq_check(dev, true);
71c63122 621 retval = rpm_callback(callback, dev);
00dc9ad1
RW
622 if (retval)
623 goto fail;
886486b7 624
7490e442 625 no_callback:
857b36c7
ML
626 __update_runtime_status(dev, RPM_SUSPENDED);
627 pm_runtime_deactivate_timer(dev);
5e928f77 628
857b36c7
ML
629 if (dev->parent) {
630 parent = dev->parent;
631 atomic_add_unless(&parent->power.child_count, -1, 0);
5e928f77
RW
632 }
633 wake_up_all(&dev->power.wait_queue);
634
635 if (dev->power.deferred_resume) {
58a34de7 636 dev->power.deferred_resume = false;
140a6c94 637 rpm_resume(dev, 0);
5e928f77
RW
638 retval = -EAGAIN;
639 goto out;
640 }
641
c3810c88 642 /* Maybe the parent is now able to suspend. */
c7b61de5 643 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
c3810c88 644 spin_unlock(&dev->power.lock);
5e928f77 645
c3810c88
AS
646 spin_lock(&parent->power.lock);
647 rpm_idle(parent, RPM_ASYNC);
648 spin_unlock(&parent->power.lock);
5e928f77 649
c3810c88 650 spin_lock(&dev->power.lock);
5e928f77
RW
651 }
652
653 out:
77893577 654 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
5e928f77
RW
655
656 return retval;
00dc9ad1
RW
657
658 fail:
bed57030 659 dev_pm_disable_wake_irq_check(dev);
00dc9ad1 660 __update_runtime_status(dev, RPM_ACTIVE);
00dc9ad1 661 dev->power.deferred_resume = false;
f2791d73
AS
662 wake_up_all(&dev->power.wait_queue);
663
00dc9ad1
RW
664 if (retval == -EAGAIN || retval == -EBUSY) {
665 dev->power.runtime_error = 0;
666
667 /*
668 * If the callback routine failed an autosuspend, and
669 * if the last_busy time has been updated so that there
670 * is a new autosuspend expiration time, automatically
671 * reschedule another autosuspend.
672 */
673 if ((rpmflags & RPM_AUTO) &&
674 pm_runtime_autosuspend_expiration(dev) != 0)
675 goto repeat;
676 } else {
677 pm_runtime_cancel_pending(dev);
678 }
00dc9ad1 679 goto out;
5e928f77
RW
680}
681
682/**
62052ab1 683 * rpm_resume - Carry out runtime resume of given device.
5e928f77 684 * @dev: Device to resume.
3f9af051 685 * @rpmflags: Flag bits.
5e928f77 686 *
62052ab1 687 * Check if the device's runtime PM status allows it to be resumed. Cancel
1bfee5bc 688 * any scheduled or pending requests. If another resume has been started
25985edc 689 * earlier, either return immediately or wait for it to finish, depending on the
1bfee5bc
AS
690 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
691 * parallel with this function, either tell the other process to resume after
692 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
693 * flag is set then queue a resume request; otherwise run the
694 * ->runtime_resume() callback directly. Queue an idle notification for the
695 * device if the resume succeeded.
5e928f77
RW
696 *
697 * This function must be called under dev->power.lock with interrupts disabled.
698 */
140a6c94 699static int rpm_resume(struct device *dev, int rpmflags)
5e928f77
RW
700 __releases(&dev->power.lock) __acquires(&dev->power.lock)
701{
71c63122 702 int (*callback)(struct device *);
5e928f77
RW
703 struct device *parent = NULL;
704 int retval = 0;
705
d44c950e 706 trace_rpm_resume_rcuidle(dev, rpmflags);
5e928f77
RW
707
708 repeat:
1bfee5bc 709 if (dev->power.runtime_error)
5e928f77 710 retval = -EINVAL;
6f3c77b0
KH
711 else if (dev->power.disable_depth == 1 && dev->power.is_suspended
712 && dev->power.runtime_status == RPM_ACTIVE)
713 retval = 1;
1bfee5bc 714 else if (dev->power.disable_depth > 0)
632e270e 715 retval = -EACCES;
1bfee5bc 716 if (retval)
5e928f77 717 goto out;
5e928f77 718
15bcb91d
AS
719 /*
720 * Other scheduled or pending requests need to be canceled. Small
721 * optimization: If an autosuspend timer is running, leave it running
722 * rather than cancelling it now only to restart it again in the near
723 * future.
724 */
725 dev->power.request = RPM_REQ_NONE;
726 if (!dev->power.timer_autosuspends)
727 pm_runtime_deactivate_timer(dev);
5e928f77 728
1bfee5bc 729 if (dev->power.runtime_status == RPM_ACTIVE) {
5e928f77 730 retval = 1;
5e928f77 731 goto out;
1bfee5bc 732 }
5e928f77
RW
733
734 if (dev->power.runtime_status == RPM_RESUMING
735 || dev->power.runtime_status == RPM_SUSPENDING) {
736 DEFINE_WAIT(wait);
737
1bfee5bc 738 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
5e928f77
RW
739 if (dev->power.runtime_status == RPM_SUSPENDING)
740 dev->power.deferred_resume = true;
1bfee5bc
AS
741 else
742 retval = -EINPROGRESS;
5e928f77
RW
743 goto out;
744 }
745
ad3c36a5
RW
746 if (dev->power.irq_safe) {
747 spin_unlock(&dev->power.lock);
748
749 cpu_relax();
750
751 spin_lock(&dev->power.lock);
752 goto repeat;
753 }
754
5e928f77
RW
755 /* Wait for the operation carried out in parallel with us. */
756 for (;;) {
757 prepare_to_wait(&dev->power.wait_queue, &wait,
758 TASK_UNINTERRUPTIBLE);
759 if (dev->power.runtime_status != RPM_RESUMING
760 && dev->power.runtime_status != RPM_SUSPENDING)
761 break;
762
763 spin_unlock_irq(&dev->power.lock);
764
765 schedule();
766
767 spin_lock_irq(&dev->power.lock);
768 }
769 finish_wait(&dev->power.wait_queue, &wait);
770 goto repeat;
771 }
772
7490e442
AS
773 /*
774 * See if we can skip waking up the parent. This is safe only if
775 * power.no_callbacks is set, because otherwise we don't know whether
776 * the resume will actually succeed.
777 */
778 if (dev->power.no_callbacks && !parent && dev->parent) {
d63be5f9 779 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
7490e442
AS
780 if (dev->parent->power.disable_depth > 0
781 || dev->parent->power.ignore_children
782 || dev->parent->power.runtime_status == RPM_ACTIVE) {
783 atomic_inc(&dev->parent->power.child_count);
784 spin_unlock(&dev->parent->power.lock);
7f321c26 785 retval = 1;
7490e442
AS
786 goto no_callback; /* Assume success. */
787 }
788 spin_unlock(&dev->parent->power.lock);
789 }
790
1bfee5bc
AS
791 /* Carry out an asynchronous or a synchronous resume. */
792 if (rpmflags & RPM_ASYNC) {
793 dev->power.request = RPM_REQ_RESUME;
794 if (!dev->power.request_pending) {
795 dev->power.request_pending = true;
796 queue_work(pm_wq, &dev->power.work);
797 }
798 retval = 0;
799 goto out;
800 }
801
5e928f77
RW
802 if (!parent && dev->parent) {
803 /*
c7b61de5
AS
804 * Increment the parent's usage counter and resume it if
805 * necessary. Not needed if dev is irq-safe; then the
806 * parent is permanently resumed.
5e928f77
RW
807 */
808 parent = dev->parent;
c7b61de5
AS
809 if (dev->power.irq_safe)
810 goto skip_parent;
862f89b3 811 spin_unlock(&dev->power.lock);
5e928f77
RW
812
813 pm_runtime_get_noresume(parent);
814
862f89b3 815 spin_lock(&parent->power.lock);
5e928f77 816 /*
216ef0b6
UH
817 * Resume the parent if it has runtime PM enabled and not been
818 * set to ignore its children.
5e928f77
RW
819 */
820 if (!parent->power.disable_depth
821 && !parent->power.ignore_children) {
140a6c94 822 rpm_resume(parent, 0);
5e928f77
RW
823 if (parent->power.runtime_status != RPM_ACTIVE)
824 retval = -EBUSY;
825 }
862f89b3 826 spin_unlock(&parent->power.lock);
5e928f77 827
862f89b3 828 spin_lock(&dev->power.lock);
5e928f77
RW
829 if (retval)
830 goto out;
831 goto repeat;
832 }
c7b61de5 833 skip_parent:
5e928f77 834
7490e442
AS
835 if (dev->power.no_callbacks)
836 goto no_callback; /* Assume success. */
837
8d4b9d1b 838 __update_runtime_status(dev, RPM_RESUMING);
5e928f77 839
dbcd2d72 840 callback = RPM_GET_CALLBACK(dev, runtime_resume);
35cd133c 841
bed57030 842 dev_pm_disable_wake_irq_check(dev);
71c63122 843 retval = rpm_callback(callback, dev);
5e928f77 844 if (retval) {
8d4b9d1b 845 __update_runtime_status(dev, RPM_SUSPENDED);
5e928f77 846 pm_runtime_cancel_pending(dev);
bed57030 847 dev_pm_enable_wake_irq_check(dev, false);
5e928f77 848 } else {
7490e442 849 no_callback:
8d4b9d1b 850 __update_runtime_status(dev, RPM_ACTIVE);
56f487c7 851 pm_runtime_mark_last_busy(dev);
5e928f77
RW
852 if (parent)
853 atomic_inc(&parent->power.child_count);
854 }
855 wake_up_all(&dev->power.wait_queue);
856
7f321c26 857 if (retval >= 0)
140a6c94 858 rpm_idle(dev, RPM_ASYNC);
5e928f77
RW
859
860 out:
c7b61de5 861 if (parent && !dev->power.irq_safe) {
5e928f77
RW
862 spin_unlock_irq(&dev->power.lock);
863
864 pm_runtime_put(parent);
865
866 spin_lock_irq(&dev->power.lock);
867 }
868
d44c950e 869 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
5e928f77
RW
870
871 return retval;
872}
873
5e928f77 874/**
62052ab1 875 * pm_runtime_work - Universal runtime PM work function.
5e928f77
RW
876 * @work: Work structure used for scheduling the execution of this function.
877 *
878 * Use @work to get the device object the work is to be done for, determine what
62052ab1 879 * is to be done and execute the appropriate runtime PM function.
5e928f77
RW
880 */
881static void pm_runtime_work(struct work_struct *work)
882{
883 struct device *dev = container_of(work, struct device, power.work);
884 enum rpm_request req;
885
886 spin_lock_irq(&dev->power.lock);
887
888 if (!dev->power.request_pending)
889 goto out;
890
891 req = dev->power.request;
892 dev->power.request = RPM_REQ_NONE;
893 dev->power.request_pending = false;
894
895 switch (req) {
896 case RPM_REQ_NONE:
897 break;
898 case RPM_REQ_IDLE:
140a6c94 899 rpm_idle(dev, RPM_NOWAIT);
5e928f77
RW
900 break;
901 case RPM_REQ_SUSPEND:
140a6c94 902 rpm_suspend(dev, RPM_NOWAIT);
5e928f77 903 break;
15bcb91d
AS
904 case RPM_REQ_AUTOSUSPEND:
905 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
906 break;
5e928f77 907 case RPM_REQ_RESUME:
140a6c94 908 rpm_resume(dev, RPM_NOWAIT);
5e928f77
RW
909 break;
910 }
911
912 out:
913 spin_unlock_irq(&dev->power.lock);
914}
915
5e928f77
RW
916/**
917 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
918 * @data: Device pointer passed by pm_schedule_suspend().
919 *
1bfee5bc 920 * Check if the time is right and queue a suspend request.
5e928f77 921 */
8234f673 922static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
5e928f77 923{
8234f673 924 struct device *dev = container_of(timer, struct device, power.suspend_timer);
5e928f77 925 unsigned long flags;
8234f673 926 u64 expires;
5e928f77
RW
927
928 spin_lock_irqsave(&dev->power.lock, flags);
929
930 expires = dev->power.timer_expires;
1f7b7081
LM
931 /*
932 * If 'expires' is after the current time, we've been called
933 * too early.
934 */
15efb47d 935 if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
5e928f77 936 dev->power.timer_expires = 0;
15bcb91d
AS
937 rpm_suspend(dev, dev->power.timer_autosuspends ?
938 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
5e928f77
RW
939 }
940
941 spin_unlock_irqrestore(&dev->power.lock, flags);
8234f673
VG
942
943 return HRTIMER_NORESTART;
5e928f77
RW
944}
945
946/**
947 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
948 * @dev: Device to suspend.
949 * @delay: Time to wait before submitting a suspend request, in milliseconds.
950 */
951int pm_schedule_suspend(struct device *dev, unsigned int delay)
952{
953 unsigned long flags;
15efb47d 954 u64 expires;
1bfee5bc 955 int retval;
5e928f77
RW
956
957 spin_lock_irqsave(&dev->power.lock, flags);
958
5e928f77 959 if (!delay) {
140a6c94 960 retval = rpm_suspend(dev, RPM_ASYNC);
5e928f77
RW
961 goto out;
962 }
963
1bfee5bc 964 retval = rpm_check_suspend_allowed(dev);
5e928f77
RW
965 if (retval)
966 goto out;
967
1bfee5bc
AS
968 /* Other scheduled or pending requests need to be canceled. */
969 pm_runtime_cancel_pending(dev);
970
15efb47d
VG
971 expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
972 dev->power.timer_expires = expires;
15bcb91d 973 dev->power.timer_autosuspends = 0;
8234f673 974 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
5e928f77
RW
975
976 out:
977 spin_unlock_irqrestore(&dev->power.lock, flags);
978
979 return retval;
980}
981EXPORT_SYMBOL_GPL(pm_schedule_suspend);
982
5e928f77 983/**
62052ab1 984 * __pm_runtime_idle - Entry point for runtime idle operations.
140a6c94
AS
985 * @dev: Device to send idle notification for.
986 * @rpmflags: Flag bits.
987 *
988 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
989 * return immediately if it is larger than zero. Then carry out an idle
990 * notification, either synchronous or asynchronous.
991 *
311aab73
CC
992 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
993 * or if pm_runtime_irq_safe() has been called.
5e928f77 994 */
140a6c94 995int __pm_runtime_idle(struct device *dev, int rpmflags)
5e928f77
RW
996{
997 unsigned long flags;
998 int retval;
999
140a6c94
AS
1000 if (rpmflags & RPM_GET_PUT) {
1001 if (!atomic_dec_and_test(&dev->power.usage_count))
1002 return 0;
1003 }
1004
a9306a63
RW
1005 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1006
5e928f77 1007 spin_lock_irqsave(&dev->power.lock, flags);
140a6c94 1008 retval = rpm_idle(dev, rpmflags);
5e928f77
RW
1009 spin_unlock_irqrestore(&dev->power.lock, flags);
1010
1011 return retval;
1012}
140a6c94 1013EXPORT_SYMBOL_GPL(__pm_runtime_idle);
5e928f77
RW
1014
1015/**
62052ab1 1016 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
140a6c94 1017 * @dev: Device to suspend.
3f9af051 1018 * @rpmflags: Flag bits.
5e928f77 1019 *
15bcb91d
AS
1020 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1021 * return immediately if it is larger than zero. Then carry out a suspend,
1022 * either synchronous or asynchronous.
140a6c94 1023 *
311aab73
CC
1024 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1025 * or if pm_runtime_irq_safe() has been called.
5e928f77 1026 */
140a6c94 1027int __pm_runtime_suspend(struct device *dev, int rpmflags)
5e928f77 1028{
140a6c94 1029 unsigned long flags;
1d531c14 1030 int retval;
5e928f77 1031
15bcb91d
AS
1032 if (rpmflags & RPM_GET_PUT) {
1033 if (!atomic_dec_and_test(&dev->power.usage_count))
1034 return 0;
1035 }
1036
a9306a63
RW
1037 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1038
140a6c94
AS
1039 spin_lock_irqsave(&dev->power.lock, flags);
1040 retval = rpm_suspend(dev, rpmflags);
1041 spin_unlock_irqrestore(&dev->power.lock, flags);
5e928f77
RW
1042
1043 return retval;
1044}
140a6c94 1045EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
5e928f77
RW
1046
1047/**
62052ab1 1048 * __pm_runtime_resume - Entry point for runtime resume operations.
140a6c94 1049 * @dev: Device to resume.
3f9af051 1050 * @rpmflags: Flag bits.
5e928f77 1051 *
140a6c94
AS
1052 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
1053 * carry out a resume, either synchronous or asynchronous.
1054 *
311aab73
CC
1055 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1056 * or if pm_runtime_irq_safe() has been called.
5e928f77 1057 */
140a6c94 1058int __pm_runtime_resume(struct device *dev, int rpmflags)
5e928f77 1059{
140a6c94
AS
1060 unsigned long flags;
1061 int retval;
5e928f77 1062
a9306a63
RW
1063 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1064 dev->power.runtime_status != RPM_ACTIVE);
311aab73 1065
140a6c94
AS
1066 if (rpmflags & RPM_GET_PUT)
1067 atomic_inc(&dev->power.usage_count);
1068
1069 spin_lock_irqsave(&dev->power.lock, flags);
1070 retval = rpm_resume(dev, rpmflags);
1071 spin_unlock_irqrestore(&dev->power.lock, flags);
5e928f77
RW
1072
1073 return retval;
1074}
140a6c94 1075EXPORT_SYMBOL_GPL(__pm_runtime_resume);
5e928f77 1076
a436b6a1
RW
1077/**
1078 * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
1079 * @dev: Device to handle.
1080 *
1081 * Return -EINVAL if runtime PM is disabled for the device.
1082 *
1083 * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
1084 * and the runtime PM usage counter is nonzero, increment the counter and
1085 * return 1. Otherwise return 0 without changing the counter.
1086 */
1087int pm_runtime_get_if_in_use(struct device *dev)
1088{
1089 unsigned long flags;
1090 int retval;
1091
1092 spin_lock_irqsave(&dev->power.lock, flags);
1093 retval = dev->power.disable_depth > 0 ? -EINVAL :
1094 dev->power.runtime_status == RPM_ACTIVE
1095 && atomic_inc_not_zero(&dev->power.usage_count);
1096 spin_unlock_irqrestore(&dev->power.lock, flags);
1097 return retval;
1098}
1099EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
1100
5e928f77 1101/**
62052ab1 1102 * __pm_runtime_set_status - Set runtime PM status of a device.
5e928f77 1103 * @dev: Device to handle.
62052ab1 1104 * @status: New runtime PM status of the device.
5e928f77 1105 *
62052ab1 1106 * If runtime PM of the device is disabled or its power.runtime_error field is
5e928f77
RW
1107 * different from zero, the status may be changed either to RPM_ACTIVE, or to
1108 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1109 * However, if the device has a parent and the parent is not active, and the
1110 * parent's power.ignore_children flag is unset, the device's status cannot be
1111 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1112 *
1113 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1114 * and the device parent's counter of unsuspended children is modified to
1115 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
1116 * notification request for the parent is submitted.
1117 */
1118int __pm_runtime_set_status(struct device *dev, unsigned int status)
1119{
1120 struct device *parent = dev->parent;
1121 unsigned long flags;
1122 bool notify_parent = false;
1123 int error = 0;
1124
1125 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1126 return -EINVAL;
1127
1128 spin_lock_irqsave(&dev->power.lock, flags);
1129
1130 if (!dev->power.runtime_error && !dev->power.disable_depth) {
1131 error = -EAGAIN;
1132 goto out;
1133 }
1134
f8817f61 1135 if (dev->power.runtime_status == status || !parent)
5e928f77
RW
1136 goto out_set;
1137
1138 if (status == RPM_SUSPENDED) {
f8817f61
RW
1139 atomic_add_unless(&parent->power.child_count, -1, 0);
1140 notify_parent = !parent->power.ignore_children;
1141 } else {
bab636b9 1142 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
5e928f77
RW
1143
1144 /*
1145 * It is invalid to put an active child under a parent that is
62052ab1 1146 * not active, has runtime PM enabled and the
5e928f77
RW
1147 * 'power.ignore_children' flag unset.
1148 */
1149 if (!parent->power.disable_depth
1150 && !parent->power.ignore_children
71723f95
LW
1151 && parent->power.runtime_status != RPM_ACTIVE) {
1152 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1153 dev_name(dev),
1154 dev_name(parent));
5e928f77 1155 error = -EBUSY;
71723f95 1156 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
965c4ac0 1157 atomic_inc(&parent->power.child_count);
71723f95 1158 }
5e928f77 1159
862f89b3 1160 spin_unlock(&parent->power.lock);
5e928f77
RW
1161
1162 if (error)
1163 goto out;
1164 }
1165
1166 out_set:
8d4b9d1b 1167 __update_runtime_status(dev, status);
5e928f77
RW
1168 dev->power.runtime_error = 0;
1169 out:
1170 spin_unlock_irqrestore(&dev->power.lock, flags);
1171
1172 if (notify_parent)
1173 pm_request_idle(parent);
1174
1175 return error;
1176}
1177EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1178
1179/**
1180 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1181 * @dev: Device to handle.
1182 *
1183 * Flush all pending requests for the device from pm_wq and wait for all
62052ab1 1184 * runtime PM operations involving the device in progress to complete.
5e928f77
RW
1185 *
1186 * Should be called under dev->power.lock with interrupts disabled.
1187 */
1188static void __pm_runtime_barrier(struct device *dev)
1189{
1190 pm_runtime_deactivate_timer(dev);
1191
1192 if (dev->power.request_pending) {
1193 dev->power.request = RPM_REQ_NONE;
1194 spin_unlock_irq(&dev->power.lock);
1195
1196 cancel_work_sync(&dev->power.work);
1197
1198 spin_lock_irq(&dev->power.lock);
1199 dev->power.request_pending = false;
1200 }
1201
1202 if (dev->power.runtime_status == RPM_SUSPENDING
1203 || dev->power.runtime_status == RPM_RESUMING
1204 || dev->power.idle_notification) {
1205 DEFINE_WAIT(wait);
1206
1207 /* Suspend, wake-up or idle notification in progress. */
1208 for (;;) {
1209 prepare_to_wait(&dev->power.wait_queue, &wait,
1210 TASK_UNINTERRUPTIBLE);
1211 if (dev->power.runtime_status != RPM_SUSPENDING
1212 && dev->power.runtime_status != RPM_RESUMING
1213 && !dev->power.idle_notification)
1214 break;
1215 spin_unlock_irq(&dev->power.lock);
1216
1217 schedule();
1218
1219 spin_lock_irq(&dev->power.lock);
1220 }
1221 finish_wait(&dev->power.wait_queue, &wait);
1222 }
1223}
1224
1225/**
1226 * pm_runtime_barrier - Flush pending requests and wait for completions.
1227 * @dev: Device to handle.
1228 *
1229 * Prevent the device from being suspended by incrementing its usage counter and
1230 * if there's a pending resume request for the device, wake the device up.
1231 * Next, make sure that all pending requests for the device have been flushed
62052ab1 1232 * from pm_wq and wait for all runtime PM operations involving the device in
5e928f77
RW
1233 * progress to complete.
1234 *
1235 * Return value:
1236 * 1, if there was a resume request pending and the device had to be woken up,
1237 * 0, otherwise
1238 */
1239int pm_runtime_barrier(struct device *dev)
1240{
1241 int retval = 0;
1242
1243 pm_runtime_get_noresume(dev);
1244 spin_lock_irq(&dev->power.lock);
1245
1246 if (dev->power.request_pending
1247 && dev->power.request == RPM_REQ_RESUME) {
140a6c94 1248 rpm_resume(dev, 0);
5e928f77
RW
1249 retval = 1;
1250 }
1251
1252 __pm_runtime_barrier(dev);
1253
1254 spin_unlock_irq(&dev->power.lock);
1255 pm_runtime_put_noidle(dev);
1256
1257 return retval;
1258}
1259EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1260
1261/**
62052ab1 1262 * __pm_runtime_disable - Disable runtime PM of a device.
5e928f77
RW
1263 * @dev: Device to handle.
1264 * @check_resume: If set, check if there's a resume request for the device.
1265 *
7b60894f 1266 * Increment power.disable_depth for the device and if it was zero previously,
62052ab1 1267 * cancel all pending runtime PM requests for the device and wait for all
5e928f77 1268 * operations in progress to complete. The device can be either active or
62052ab1 1269 * suspended after its runtime PM has been disabled.
5e928f77
RW
1270 *
1271 * If @check_resume is set and there's a resume request pending when
1272 * __pm_runtime_disable() is called and power.disable_depth is zero, the
62052ab1 1273 * function will wake up the device before disabling its runtime PM.
5e928f77
RW
1274 */
1275void __pm_runtime_disable(struct device *dev, bool check_resume)
1276{
1277 spin_lock_irq(&dev->power.lock);
1278
1279 if (dev->power.disable_depth > 0) {
1280 dev->power.disable_depth++;
1281 goto out;
1282 }
1283
1284 /*
1285 * Wake up the device if there's a resume request pending, because that
62052ab1 1286 * means there probably is some I/O to process and disabling runtime PM
5e928f77
RW
1287 * shouldn't prevent the device from processing the I/O.
1288 */
1289 if (check_resume && dev->power.request_pending
1290 && dev->power.request == RPM_REQ_RESUME) {
1291 /*
1292 * Prevent suspends and idle notifications from being carried
1293 * out after we have woken up the device.
1294 */
1295 pm_runtime_get_noresume(dev);
1296
140a6c94 1297 rpm_resume(dev, 0);
5e928f77
RW
1298
1299 pm_runtime_put_noidle(dev);
1300 }
1301
fed7e88c
VG
1302 /* Update time accounting before disabling PM-runtime. */
1303 update_pm_runtime_accounting(dev);
1304
5e928f77
RW
1305 if (!dev->power.disable_depth++)
1306 __pm_runtime_barrier(dev);
1307
1308 out:
1309 spin_unlock_irq(&dev->power.lock);
1310}
1311EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1312
1313/**
62052ab1 1314 * pm_runtime_enable - Enable runtime PM of a device.
5e928f77
RW
1315 * @dev: Device to handle.
1316 */
1317void pm_runtime_enable(struct device *dev)
1318{
1319 unsigned long flags;
1320
1321 spin_lock_irqsave(&dev->power.lock, flags);
1322
58456488 1323 if (dev->power.disable_depth > 0) {
5e928f77 1324 dev->power.disable_depth--;
58456488
VG
1325
1326 /* About to enable runtime pm, set accounting_timestamp to now */
1327 if (!dev->power.disable_depth)
c155f649 1328 dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
58456488 1329 } else {
5e928f77 1330 dev_warn(dev, "Unbalanced %s!\n", __func__);
58456488 1331 }
5e928f77 1332
f8817f61
RW
1333 WARN(!dev->power.disable_depth &&
1334 dev->power.runtime_status == RPM_SUSPENDED &&
1335 !dev->power.ignore_children &&
1336 atomic_read(&dev->power.child_count) > 0,
1337 "Enabling runtime PM for inactive device (%s) with active children\n",
1338 dev_name(dev));
1339
5e928f77
RW
1340 spin_unlock_irqrestore(&dev->power.lock, flags);
1341}
1342EXPORT_SYMBOL_GPL(pm_runtime_enable);
1343
53823639 1344/**
62052ab1 1345 * pm_runtime_forbid - Block runtime PM of a device.
53823639
RW
1346 * @dev: Device to handle.
1347 *
1348 * Increase the device's usage count and clear its power.runtime_auto flag,
1349 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1350 * for it.
1351 */
1352void pm_runtime_forbid(struct device *dev)
1353{
1354 spin_lock_irq(&dev->power.lock);
1355 if (!dev->power.runtime_auto)
1356 goto out;
1357
1358 dev->power.runtime_auto = false;
1359 atomic_inc(&dev->power.usage_count);
140a6c94 1360 rpm_resume(dev, 0);
53823639
RW
1361
1362 out:
1363 spin_unlock_irq(&dev->power.lock);
1364}
1365EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1366
1367/**
62052ab1 1368 * pm_runtime_allow - Unblock runtime PM of a device.
53823639
RW
1369 * @dev: Device to handle.
1370 *
1371 * Decrease the device's usage count and set its power.runtime_auto flag.
1372 */
1373void pm_runtime_allow(struct device *dev)
1374{
1375 spin_lock_irq(&dev->power.lock);
1376 if (dev->power.runtime_auto)
1377 goto out;
1378
1379 dev->power.runtime_auto = true;
1380 if (atomic_dec_and_test(&dev->power.usage_count))
fe7450b0 1381 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
53823639
RW
1382
1383 out:
1384 spin_unlock_irq(&dev->power.lock);
1385}
1386EXPORT_SYMBOL_GPL(pm_runtime_allow);
1387
7490e442 1388/**
62052ab1 1389 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
7490e442
AS
1390 * @dev: Device to handle.
1391 *
1392 * Set the power.no_callbacks flag, which tells the PM core that this
62052ab1
RW
1393 * device is power-managed through its parent and has no runtime PM
1394 * callbacks of its own. The runtime sysfs attributes will be removed.
7490e442
AS
1395 */
1396void pm_runtime_no_callbacks(struct device *dev)
1397{
1398 spin_lock_irq(&dev->power.lock);
1399 dev->power.no_callbacks = 1;
1400 spin_unlock_irq(&dev->power.lock);
1401 if (device_is_registered(dev))
1402 rpm_sysfs_remove(dev);
1403}
1404EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1405
c7b61de5
AS
1406/**
1407 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1408 * @dev: Device to handle
1409 *
1410 * Set the power.irq_safe flag, which tells the PM core that the
1411 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1412 * always be invoked with the spinlock held and interrupts disabled. It also
1413 * causes the parent's usage counter to be permanently incremented, preventing
1414 * the parent from runtime suspending -- otherwise an irq-safe child might have
1415 * to wait for a non-irq-safe parent.
1416 */
1417void pm_runtime_irq_safe(struct device *dev)
1418{
1419 if (dev->parent)
1420 pm_runtime_get_sync(dev->parent);
1421 spin_lock_irq(&dev->power.lock);
1422 dev->power.irq_safe = 1;
1423 spin_unlock_irq(&dev->power.lock);
1424}
1425EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1426
15bcb91d
AS
1427/**
1428 * update_autosuspend - Handle a change to a device's autosuspend settings.
1429 * @dev: Device to handle.
1430 * @old_delay: The former autosuspend_delay value.
1431 * @old_use: The former use_autosuspend value.
1432 *
1433 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1434 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1435 *
1436 * This function must be called under dev->power.lock with interrupts disabled.
1437 */
1438static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1439{
1440 int delay = dev->power.autosuspend_delay;
1441
1442 /* Should runtime suspend be prevented now? */
1443 if (dev->power.use_autosuspend && delay < 0) {
1444
1445 /* If it used to be allowed then prevent it. */
1446 if (!old_use || old_delay >= 0) {
1447 atomic_inc(&dev->power.usage_count);
1448 rpm_resume(dev, 0);
1449 }
1450 }
1451
1452 /* Runtime suspend should be allowed now. */
1453 else {
1454
1455 /* If it used to be prevented then allow it. */
1456 if (old_use && old_delay < 0)
1457 atomic_dec(&dev->power.usage_count);
1458
1459 /* Maybe we can autosuspend now. */
1460 rpm_idle(dev, RPM_AUTO);
1461 }
1462}
1463
1464/**
1465 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1466 * @dev: Device to handle.
1467 * @delay: Value of the new delay in milliseconds.
1468 *
1469 * Set the device's power.autosuspend_delay value. If it changes to negative
62052ab1
RW
1470 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1471 * changes the other way, allow runtime suspends.
15bcb91d
AS
1472 */
1473void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1474{
1475 int old_delay, old_use;
1476
1477 spin_lock_irq(&dev->power.lock);
1478 old_delay = dev->power.autosuspend_delay;
1479 old_use = dev->power.use_autosuspend;
1480 dev->power.autosuspend_delay = delay;
1481 update_autosuspend(dev, old_delay, old_use);
1482 spin_unlock_irq(&dev->power.lock);
1483}
1484EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1485
1486/**
1487 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1488 * @dev: Device to handle.
1489 * @use: New value for use_autosuspend.
1490 *
62052ab1 1491 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
15bcb91d
AS
1492 * suspends as needed.
1493 */
1494void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1495{
1496 int old_delay, old_use;
1497
1498 spin_lock_irq(&dev->power.lock);
1499 old_delay = dev->power.autosuspend_delay;
1500 old_use = dev->power.use_autosuspend;
1501 dev->power.use_autosuspend = use;
1502 update_autosuspend(dev, old_delay, old_use);
1503 spin_unlock_irq(&dev->power.lock);
1504}
1505EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1506
5e928f77 1507/**
62052ab1 1508 * pm_runtime_init - Initialize runtime PM fields in given device object.
5e928f77
RW
1509 * @dev: Device object to initialize.
1510 */
1511void pm_runtime_init(struct device *dev)
1512{
5e928f77
RW
1513 dev->power.runtime_status = RPM_SUSPENDED;
1514 dev->power.idle_notification = false;
1515
1516 dev->power.disable_depth = 1;
1517 atomic_set(&dev->power.usage_count, 0);
1518
1519 dev->power.runtime_error = 0;
1520
1521 atomic_set(&dev->power.child_count, 0);
1522 pm_suspend_ignore_children(dev, false);
53823639 1523 dev->power.runtime_auto = true;
5e928f77
RW
1524
1525 dev->power.request_pending = false;
1526 dev->power.request = RPM_REQ_NONE;
1527 dev->power.deferred_resume = false;
1528 INIT_WORK(&dev->power.work, pm_runtime_work);
1529
1530 dev->power.timer_expires = 0;
8234f673
VG
1531 hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1532 dev->power.suspend_timer.function = pm_suspend_timer_fn;
5e928f77
RW
1533
1534 init_waitqueue_head(&dev->power.wait_queue);
1535}
1536
5de85b9d
UH
1537/**
1538 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1539 * @dev: Device object to re-initialize.
1540 */
1541void pm_runtime_reinit(struct device *dev)
1542{
1543 if (!pm_runtime_enabled(dev)) {
1544 if (dev->power.runtime_status == RPM_ACTIVE)
1545 pm_runtime_set_suspended(dev);
1546 if (dev->power.irq_safe) {
1547 spin_lock_irq(&dev->power.lock);
1548 dev->power.irq_safe = 0;
1549 spin_unlock_irq(&dev->power.lock);
1550 if (dev->parent)
1551 pm_runtime_put(dev->parent);
1552 }
1553 }
1554}
1555
5e928f77
RW
1556/**
1557 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1558 * @dev: Device object being removed from device hierarchy.
1559 */
1560void pm_runtime_remove(struct device *dev)
1561{
1562 __pm_runtime_disable(dev, false);
5de85b9d 1563 pm_runtime_reinit(dev);
5e928f77 1564}
37f20416 1565
21d5c57b
RW
1566/**
1567 * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
1568 * @dev: Device whose driver is going to be removed.
1569 *
1570 * Check links from this device to any consumers and if any of them have active
1571 * runtime PM references to the device, drop the usage counter of the device
1572 * (once per link).
1573 *
1574 * Links with the DL_FLAG_STATELESS flag set are ignored.
1575 *
1576 * Since the device is guaranteed to be runtime-active at the point this is
1577 * called, nothing else needs to be done here.
1578 *
1579 * Moreover, this is called after device_links_busy() has returned 'false', so
1580 * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
1581 * therefore rpm_active can't be manipulated concurrently.
1582 */
1583void pm_runtime_clean_up_links(struct device *dev)
1584{
1585 struct device_link *link;
1586 int idx;
1587
1588 idx = device_links_read_lock();
1589
1590 list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
1591 if (link->flags & DL_FLAG_STATELESS)
1592 continue;
1593
1594 if (link->rpm_active) {
1595 pm_runtime_put_noidle(dev);
1596 link->rpm_active = false;
1597 }
1598 }
1599
1600 device_links_read_unlock(idx);
1601}
1602
1603/**
b06c0b2f 1604 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
21d5c57b
RW
1605 * @dev: Consumer device.
1606 */
b06c0b2f 1607void pm_runtime_get_suppliers(struct device *dev)
21d5c57b 1608{
b06c0b2f 1609 struct device_link *link;
21d5c57b
RW
1610 int idx;
1611
1612 idx = device_links_read_lock();
1613
b06c0b2f
RW
1614 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1615 if (link->flags & DL_FLAG_PM_RUNTIME)
1616 pm_runtime_get_sync(link->supplier);
1617
1618 device_links_read_unlock(idx);
1619}
1620
1621/**
1622 * pm_runtime_put_suppliers - Drop references to supplier devices.
1623 * @dev: Consumer device.
1624 */
1625void pm_runtime_put_suppliers(struct device *dev)
1626{
1627 struct device_link *link;
1628 int idx;
1629
1630 idx = device_links_read_lock();
1631
1632 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1633 if (link->flags & DL_FLAG_PM_RUNTIME)
1634 pm_runtime_put(link->supplier);
21d5c57b
RW
1635
1636 device_links_read_unlock(idx);
1637}
1638
baa8809f
RW
1639void pm_runtime_new_link(struct device *dev)
1640{
1641 spin_lock_irq(&dev->power.lock);
1642 dev->power.links_count++;
1643 spin_unlock_irq(&dev->power.lock);
1644}
1645
1646void pm_runtime_drop_link(struct device *dev)
1647{
a0504aec
UH
1648 rpm_put_suppliers(dev);
1649
baa8809f
RW
1650 spin_lock_irq(&dev->power.lock);
1651 WARN_ON(dev->power.links_count == 0);
1652 dev->power.links_count--;
1653 spin_unlock_irq(&dev->power.lock);
1654}
1655
4918e1f8
RW
1656static bool pm_runtime_need_not_resume(struct device *dev)
1657{
1658 return atomic_read(&dev->power.usage_count) <= 1 &&
1f5c6855
RW
1659 (atomic_read(&dev->power.child_count) == 0 ||
1660 dev->power.ignore_children);
4918e1f8
RW
1661}
1662
37f20416
UH
1663/**
1664 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1665 * @dev: Device to suspend.
1666 *
1667 * Disable runtime PM so we safely can check the device's runtime PM status and
4918e1f8
RW
1668 * if it is active, invoke its ->runtime_suspend callback to suspend it and
1669 * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's
1670 * usage and children counters don't indicate that the device was in use before
1671 * the system-wide transition under way, decrement its parent's children counter
1672 * (if there is a parent). Keep runtime PM disabled to preserve the state
1673 * unless we encounter errors.
37f20416
UH
1674 *
1675 * Typically this function may be invoked from a system suspend callback to make
4918e1f8
RW
1676 * sure the device is put into low power state and it should only be used during
1677 * system-wide PM transitions to sleep states. It assumes that the analogous
1678 * pm_runtime_force_resume() will be used to resume the device.
37f20416
UH
1679 */
1680int pm_runtime_force_suspend(struct device *dev)
1681{
1682 int (*callback)(struct device *);
617fcb67 1683 int ret;
37f20416
UH
1684
1685 pm_runtime_disable(dev);
37f20416
UH
1686 if (pm_runtime_status_suspended(dev))
1687 return 0;
1688
dbcd2d72 1689 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
37f20416 1690
617fcb67 1691 ret = callback ? callback(dev) : 0;
37f20416
UH
1692 if (ret)
1693 goto err;
1694
1d9174fb 1695 /*
4918e1f8
RW
1696 * If the device can stay in suspend after the system-wide transition
1697 * to the working state that will follow, drop the children counter of
1698 * its parent, but set its status to RPM_SUSPENDED anyway in case this
1699 * function will be called again for it in the meantime.
1d9174fb 1700 */
4918e1f8
RW
1701 if (pm_runtime_need_not_resume(dev))
1702 pm_runtime_set_suspended(dev);
1703 else
1704 __update_runtime_status(dev, RPM_SUSPENDED);
1d9174fb 1705
37f20416 1706 return 0;
4918e1f8 1707
37f20416
UH
1708err:
1709 pm_runtime_enable(dev);
1710 return ret;
1711}
1712EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1713
1714/**
1d9174fb 1715 * pm_runtime_force_resume - Force a device into resume state if needed.
37f20416
UH
1716 * @dev: Device to resume.
1717 *
1718 * Prior invoking this function we expect the user to have brought the device
1719 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
4918e1f8
RW
1720 * those actions and bring the device into full power, if it is expected to be
1721 * used on system resume. In the other case, we defer the resume to be managed
1722 * via runtime PM.
37f20416 1723 *
1d9174fb 1724 * Typically this function may be invoked from a system resume callback.
37f20416
UH
1725 */
1726int pm_runtime_force_resume(struct device *dev)
1727{
1728 int (*callback)(struct device *);
1729 int ret = 0;
1730
4918e1f8 1731 if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
9f5b5274
UH
1732 goto out;
1733
1d9174fb 1734 /*
4918e1f8
RW
1735 * The value of the parent's children counter is correct already, so
1736 * just update the status of the device.
1737 */
1738 __update_runtime_status(dev, RPM_ACTIVE);
1d9174fb 1739
4918e1f8 1740 callback = RPM_GET_CALLBACK(dev, runtime_resume);
37f20416 1741
617fcb67 1742 ret = callback ? callback(dev) : 0;
0ae3aeef
UH
1743 if (ret) {
1744 pm_runtime_set_suspended(dev);
1745 goto out;
1746 }
1747
37f20416
UH
1748 pm_runtime_mark_last_busy(dev);
1749out:
1750 pm_runtime_enable(dev);
1751 return ret;
1752}
1753EXPORT_SYMBOL_GPL(pm_runtime_force_resume);