PM / runtime: Use device links
[linux-2.6-block.git] / drivers / base / power / runtime.c
CommitLineData
5e928f77 1/*
62052ab1 2 * drivers/base/power/runtime.c - Helper functions for device runtime PM
5e928f77
RW
3 *
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
1bfee5bc 5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
5e928f77
RW
6 *
7 * This file is released under the GPLv2.
8 */
9
10#include <linux/sched.h>
1b6bc32f 11#include <linux/export.h>
5e928f77 12#include <linux/pm_runtime.h>
4990d4fe 13#include <linux/pm_wakeirq.h>
c3dc2f14 14#include <trace/events/rpm.h>
21d5c57b
RW
15
16#include "../base.h"
7490e442 17#include "power.h"
5e928f77 18
dbcd2d72 19typedef int (*pm_callback_t)(struct device *);
5f59df79 20
dbcd2d72 21static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
5f59df79 22{
dbcd2d72
AH
23 pm_callback_t cb;
24 const struct dev_pm_ops *ops;
25
26 if (dev->pm_domain)
27 ops = &dev->pm_domain->ops;
28 else if (dev->type && dev->type->pm)
29 ops = dev->type->pm;
30 else if (dev->class && dev->class->pm)
31 ops = dev->class->pm;
32 else if (dev->bus && dev->bus->pm)
33 ops = dev->bus->pm;
34 else
35 ops = NULL;
36
37 if (ops)
38 cb = *(pm_callback_t *)((void *)ops + cb_offset);
39 else
40 cb = NULL;
41
42 if (!cb && dev->driver && dev->driver->pm)
43 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
44
45 return cb;
5f59df79
UH
46}
47
dbcd2d72
AH
48#define RPM_GET_CALLBACK(dev, callback) \
49 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
50
140a6c94 51static int rpm_resume(struct device *dev, int rpmflags);
7490e442 52static int rpm_suspend(struct device *dev, int rpmflags);
5e928f77 53
4769373c
AS
54/**
55 * update_pm_runtime_accounting - Update the time accounting of power states
56 * @dev: Device to update the accounting for
57 *
58 * In order to be able to have time accounting of the various power states
59 * (as used by programs such as PowerTOP to show the effectiveness of runtime
60 * PM), we need to track the time spent in each state.
61 * update_pm_runtime_accounting must be called each time before the
62 * runtime_status field is updated, to account the time in the old state
63 * correctly.
64 */
65void update_pm_runtime_accounting(struct device *dev)
66{
67 unsigned long now = jiffies;
def0c0a3 68 unsigned long delta;
4769373c
AS
69
70 delta = now - dev->power.accounting_timestamp;
71
4769373c
AS
72 dev->power.accounting_timestamp = now;
73
74 if (dev->power.disable_depth > 0)
75 return;
76
77 if (dev->power.runtime_status == RPM_SUSPENDED)
78 dev->power.suspended_jiffies += delta;
79 else
80 dev->power.active_jiffies += delta;
81}
82
83static void __update_runtime_status(struct device *dev, enum rpm_status status)
84{
85 update_pm_runtime_accounting(dev);
86 dev->power.runtime_status = status;
87}
88
5e928f77
RW
89/**
90 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
91 * @dev: Device to handle.
92 */
93static void pm_runtime_deactivate_timer(struct device *dev)
94{
95 if (dev->power.timer_expires > 0) {
96 del_timer(&dev->power.suspend_timer);
97 dev->power.timer_expires = 0;
98 }
99}
100
101/**
102 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
103 * @dev: Device to handle.
104 */
105static void pm_runtime_cancel_pending(struct device *dev)
106{
107 pm_runtime_deactivate_timer(dev);
108 /*
109 * In case there's a request pending, make sure its work function will
110 * return without doing anything.
111 */
112 dev->power.request = RPM_REQ_NONE;
113}
114
15bcb91d
AS
115/*
116 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
117 * @dev: Device to handle.
118 *
119 * Compute the autosuspend-delay expiration time based on the device's
120 * power.last_busy time. If the delay has already expired or is disabled
121 * (negative) or the power.use_autosuspend flag isn't set, return 0.
122 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
123 *
124 * This function may be called either with or without dev->power.lock held.
125 * Either way it can be racy, since power.last_busy may be updated at any time.
126 */
127unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
128{
129 int autosuspend_delay;
130 long elapsed;
131 unsigned long last_busy;
132 unsigned long expires = 0;
133
134 if (!dev->power.use_autosuspend)
135 goto out;
136
137 autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
138 if (autosuspend_delay < 0)
139 goto out;
140
141 last_busy = ACCESS_ONCE(dev->power.last_busy);
142 elapsed = jiffies - last_busy;
143 if (elapsed < 0)
144 goto out; /* jiffies has wrapped around. */
145
146 /*
147 * If the autosuspend_delay is >= 1 second, align the timer by rounding
148 * up to the nearest second.
149 */
150 expires = last_busy + msecs_to_jiffies(autosuspend_delay);
151 if (autosuspend_delay >= 1000)
152 expires = round_jiffies(expires);
153 expires += !expires;
154 if (elapsed >= expires - last_busy)
155 expires = 0; /* Already expired. */
156
157 out:
158 return expires;
159}
160EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
161
e823407f
ML
162static int dev_memalloc_noio(struct device *dev, void *data)
163{
164 return dev->power.memalloc_noio;
165}
166
167/*
168 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
169 * @dev: Device to handle.
170 * @enable: True for setting the flag and False for clearing the flag.
171 *
172 * Set the flag for all devices in the path from the device to the
173 * root device in the device tree if @enable is true, otherwise clear
174 * the flag for devices in the path whose siblings don't set the flag.
175 *
176 * The function should only be called by block device, or network
177 * device driver for solving the deadlock problem during runtime
178 * resume/suspend:
179 *
180 * If memory allocation with GFP_KERNEL is called inside runtime
181 * resume/suspend callback of any one of its ancestors(or the
182 * block device itself), the deadlock may be triggered inside the
183 * memory allocation since it might not complete until the block
184 * device becomes active and the involed page I/O finishes. The
185 * situation is pointed out first by Alan Stern. Network device
186 * are involved in iSCSI kind of situation.
187 *
188 * The lock of dev_hotplug_mutex is held in the function for handling
189 * hotplug race because pm_runtime_set_memalloc_noio() may be called
190 * in async probe().
191 *
192 * The function should be called between device_add() and device_del()
193 * on the affected device(block/network device).
194 */
195void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
196{
197 static DEFINE_MUTEX(dev_hotplug_mutex);
198
199 mutex_lock(&dev_hotplug_mutex);
200 for (;;) {
201 bool enabled;
202
203 /* hold power lock since bitfield is not SMP-safe. */
204 spin_lock_irq(&dev->power.lock);
205 enabled = dev->power.memalloc_noio;
206 dev->power.memalloc_noio = enable;
207 spin_unlock_irq(&dev->power.lock);
208
209 /*
210 * not need to enable ancestors any more if the device
211 * has been enabled.
212 */
213 if (enabled && enable)
214 break;
215
216 dev = dev->parent;
217
218 /*
219 * clear flag of the parent device only if all the
220 * children don't set the flag because ancestor's
221 * flag was set by any one of the descendants.
222 */
223 if (!dev || (!enable &&
224 device_for_each_child(dev, NULL,
225 dev_memalloc_noio)))
226 break;
227 }
228 mutex_unlock(&dev_hotplug_mutex);
229}
230EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
231
5e928f77 232/**
1bfee5bc
AS
233 * rpm_check_suspend_allowed - Test whether a device may be suspended.
234 * @dev: Device to test.
5e928f77 235 */
1bfee5bc 236static int rpm_check_suspend_allowed(struct device *dev)
5e928f77
RW
237{
238 int retval = 0;
239
5e928f77
RW
240 if (dev->power.runtime_error)
241 retval = -EINVAL;
632e270e
RW
242 else if (dev->power.disable_depth > 0)
243 retval = -EACCES;
244 else if (atomic_read(&dev->power.usage_count) > 0)
5e928f77
RW
245 retval = -EAGAIN;
246 else if (!pm_children_suspended(dev))
247 retval = -EBUSY;
1bfee5bc
AS
248
249 /* Pending resume requests take precedence over suspends. */
250 else if ((dev->power.deferred_resume
78ca7c37 251 && dev->power.runtime_status == RPM_SUSPENDING)
1bfee5bc
AS
252 || (dev->power.request_pending
253 && dev->power.request == RPM_REQ_RESUME))
254 retval = -EAGAIN;
55d7ec45
RW
255 else if (__dev_pm_qos_read_value(dev) < 0)
256 retval = -EPERM;
1bfee5bc
AS
257 else if (dev->power.runtime_status == RPM_SUSPENDED)
258 retval = 1;
259
260 return retval;
261}
262
21d5c57b
RW
263static int rpm_get_suppliers(struct device *dev)
264{
265 struct device_link *link;
266
267 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
268 int retval;
269
270 if (!(link->flags & DL_FLAG_PM_RUNTIME))
271 continue;
272
273 if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND ||
274 link->rpm_active)
275 continue;
276
277 retval = pm_runtime_get_sync(link->supplier);
278 if (retval < 0) {
279 pm_runtime_put_noidle(link->supplier);
280 return retval;
281 }
282 link->rpm_active = true;
283 }
284 return 0;
285}
286
287static void rpm_put_suppliers(struct device *dev)
288{
289 struct device_link *link;
290
291 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
292 if (link->rpm_active &&
293 READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) {
294 pm_runtime_put(link->supplier);
295 link->rpm_active = false;
296 }
297}
298
ad3c36a5
RW
299/**
300 * __rpm_callback - Run a given runtime PM callback for a given device.
301 * @cb: Runtime PM callback to run.
302 * @dev: Device to run the callback for.
303 */
304static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
305 __releases(&dev->power.lock) __acquires(&dev->power.lock)
306{
21d5c57b 307 int retval, idx;
ad3c36a5 308
21d5c57b 309 if (dev->power.irq_safe) {
ad3c36a5 310 spin_unlock(&dev->power.lock);
21d5c57b 311 } else {
ad3c36a5
RW
312 spin_unlock_irq(&dev->power.lock);
313
21d5c57b
RW
314 /*
315 * Resume suppliers if necessary.
316 *
317 * The device's runtime PM status cannot change until this
318 * routine returns, so it is safe to read the status outside of
319 * the lock.
320 */
321 if (dev->power.runtime_status == RPM_RESUMING) {
322 idx = device_links_read_lock();
323
324 retval = rpm_get_suppliers(dev);
325 if (retval)
326 goto fail;
327
328 device_links_read_unlock(idx);
329 }
330 }
331
ad3c36a5
RW
332 retval = cb(dev);
333
21d5c57b 334 if (dev->power.irq_safe) {
ad3c36a5 335 spin_lock(&dev->power.lock);
21d5c57b
RW
336 } else {
337 /*
338 * If the device is suspending and the callback has returned
339 * success, drop the usage counters of the suppliers that have
340 * been reference counted on its resume.
341 *
342 * Do that if resume fails too.
343 */
344 if ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
345 || (dev->power.runtime_status == RPM_RESUMING && retval)) {
346 idx = device_links_read_lock();
347
348 fail:
349 rpm_put_suppliers(dev);
350
351 device_links_read_unlock(idx);
352 }
353
ad3c36a5 354 spin_lock_irq(&dev->power.lock);
21d5c57b 355 }
ad3c36a5
RW
356
357 return retval;
358}
359
1bfee5bc 360/**
140a6c94 361 * rpm_idle - Notify device bus type if the device can be suspended.
1bfee5bc
AS
362 * @dev: Device to notify the bus type about.
363 * @rpmflags: Flag bits.
364 *
62052ab1 365 * Check if the device's runtime PM status allows it to be suspended. If
1bfee5bc
AS
366 * another idle notification has been started earlier, return immediately. If
367 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
d66e6db2
UH
368 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
369 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
1bfee5bc
AS
370 *
371 * This function must be called under dev->power.lock with interrupts disabled.
372 */
140a6c94 373static int rpm_idle(struct device *dev, int rpmflags)
1bfee5bc 374{
71c63122 375 int (*callback)(struct device *);
1bfee5bc
AS
376 int retval;
377
d7737ce9 378 trace_rpm_idle_rcuidle(dev, rpmflags);
1bfee5bc
AS
379 retval = rpm_check_suspend_allowed(dev);
380 if (retval < 0)
381 ; /* Conditions are wrong. */
382
383 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
384 else if (dev->power.runtime_status != RPM_ACTIVE)
385 retval = -EAGAIN;
386
387 /*
388 * Any pending request other than an idle notification takes
389 * precedence over us, except that the timer may be running.
390 */
391 else if (dev->power.request_pending &&
392 dev->power.request > RPM_REQ_IDLE)
393 retval = -EAGAIN;
394
395 /* Act as though RPM_NOWAIT is always set. */
396 else if (dev->power.idle_notification)
397 retval = -EINPROGRESS;
5e928f77
RW
398 if (retval)
399 goto out;
400
1bfee5bc
AS
401 /* Pending requests need to be canceled. */
402 dev->power.request = RPM_REQ_NONE;
403
45f0a85c 404 if (dev->power.no_callbacks)
7490e442 405 goto out;
7490e442 406
1bfee5bc
AS
407 /* Carry out an asynchronous or a synchronous idle notification. */
408 if (rpmflags & RPM_ASYNC) {
409 dev->power.request = RPM_REQ_IDLE;
410 if (!dev->power.request_pending) {
411 dev->power.request_pending = true;
412 queue_work(pm_wq, &dev->power.work);
5e928f77 413 }
d7737ce9 414 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
45f0a85c 415 return 0;
5e928f77
RW
416 }
417
418 dev->power.idle_notification = true;
419
dbcd2d72 420 callback = RPM_GET_CALLBACK(dev, runtime_idle);
35cd133c 421
ad3c36a5 422 if (callback)
45f0a85c 423 retval = __rpm_callback(callback, dev);
5e928f77
RW
424
425 dev->power.idle_notification = false;
426 wake_up_all(&dev->power.wait_queue);
427
428 out:
d7737ce9 429 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
d66e6db2 430 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
5e928f77
RW
431}
432
71c63122
RW
433/**
434 * rpm_callback - Run a given runtime PM callback for a given device.
435 * @cb: Runtime PM callback to run.
436 * @dev: Device to run the callback for.
437 */
438static int rpm_callback(int (*cb)(struct device *), struct device *dev)
71c63122
RW
439{
440 int retval;
441
442 if (!cb)
443 return -ENOSYS;
444
db88175f
ML
445 if (dev->power.memalloc_noio) {
446 unsigned int noio_flag;
447
448 /*
449 * Deadlock might be caused if memory allocation with
450 * GFP_KERNEL happens inside runtime_suspend and
451 * runtime_resume callbacks of one block device's
452 * ancestor or the block device itself. Network
453 * device might be thought as part of iSCSI block
454 * device, so network device and its ancestor should
455 * be marked as memalloc_noio too.
456 */
457 noio_flag = memalloc_noio_save();
458 retval = __rpm_callback(cb, dev);
459 memalloc_noio_restore(noio_flag);
460 } else {
461 retval = __rpm_callback(cb, dev);
462 }
71c63122 463
71c63122 464 dev->power.runtime_error = retval;
632e270e 465 return retval != -EACCES ? retval : -EIO;
71c63122
RW
466}
467
5e928f77 468/**
62052ab1 469 * rpm_suspend - Carry out runtime suspend of given device.
5e928f77 470 * @dev: Device to suspend.
3f9af051 471 * @rpmflags: Flag bits.
5e928f77 472 *
47d8f0ba
ML
473 * Check if the device's runtime PM status allows it to be suspended.
474 * Cancel a pending idle notification, autosuspend or suspend. If
475 * another suspend has been started earlier, either return immediately
476 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
477 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
857b36c7
ML
478 * otherwise run the ->runtime_suspend() callback directly. When
479 * ->runtime_suspend succeeded, if a deferred resume was requested while
480 * the callback was running then carry it out, otherwise send an idle
481 * notification for its parent (if the suspend succeeded and both
482 * ignore_children of parent->power and irq_safe of dev->power are not set).
886486b7
AS
483 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
484 * flag is set and the next autosuspend-delay expiration time is in the
485 * future, schedule another autosuspend attempt.
5e928f77
RW
486 *
487 * This function must be called under dev->power.lock with interrupts disabled.
488 */
140a6c94 489static int rpm_suspend(struct device *dev, int rpmflags)
5e928f77
RW
490 __releases(&dev->power.lock) __acquires(&dev->power.lock)
491{
71c63122 492 int (*callback)(struct device *);
5e928f77 493 struct device *parent = NULL;
1bfee5bc 494 int retval;
5e928f77 495
77893577 496 trace_rpm_suspend_rcuidle(dev, rpmflags);
5e928f77
RW
497
498 repeat:
1bfee5bc 499 retval = rpm_check_suspend_allowed(dev);
5e928f77 500
1bfee5bc
AS
501 if (retval < 0)
502 ; /* Conditions are wrong. */
503
504 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
505 else if (dev->power.runtime_status == RPM_RESUMING &&
506 !(rpmflags & RPM_ASYNC))
5e928f77 507 retval = -EAGAIN;
1bfee5bc 508 if (retval)
5e928f77 509 goto out;
5e928f77 510
15bcb91d
AS
511 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
512 if ((rpmflags & RPM_AUTO)
513 && dev->power.runtime_status != RPM_SUSPENDING) {
514 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
515
516 if (expires != 0) {
517 /* Pending requests need to be canceled. */
518 dev->power.request = RPM_REQ_NONE;
519
520 /*
521 * Optimization: If the timer is already running and is
522 * set to expire at or before the autosuspend delay,
523 * avoid the overhead of resetting it. Just let it
524 * expire; pm_suspend_timer_fn() will take care of the
525 * rest.
526 */
527 if (!(dev->power.timer_expires && time_before_eq(
528 dev->power.timer_expires, expires))) {
529 dev->power.timer_expires = expires;
530 mod_timer(&dev->power.suspend_timer, expires);
531 }
532 dev->power.timer_autosuspends = 1;
533 goto out;
534 }
535 }
536
5e928f77
RW
537 /* Other scheduled or pending requests need to be canceled. */
538 pm_runtime_cancel_pending(dev);
539
5e928f77
RW
540 if (dev->power.runtime_status == RPM_SUSPENDING) {
541 DEFINE_WAIT(wait);
542
1bfee5bc 543 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
5e928f77
RW
544 retval = -EINPROGRESS;
545 goto out;
546 }
547
ad3c36a5
RW
548 if (dev->power.irq_safe) {
549 spin_unlock(&dev->power.lock);
550
551 cpu_relax();
552
553 spin_lock(&dev->power.lock);
554 goto repeat;
555 }
556
5e928f77
RW
557 /* Wait for the other suspend running in parallel with us. */
558 for (;;) {
559 prepare_to_wait(&dev->power.wait_queue, &wait,
560 TASK_UNINTERRUPTIBLE);
561 if (dev->power.runtime_status != RPM_SUSPENDING)
562 break;
563
564 spin_unlock_irq(&dev->power.lock);
565
566 schedule();
567
568 spin_lock_irq(&dev->power.lock);
569 }
570 finish_wait(&dev->power.wait_queue, &wait);
571 goto repeat;
572 }
573
7490e442
AS
574 if (dev->power.no_callbacks)
575 goto no_callback; /* Assume success. */
576
1bfee5bc
AS
577 /* Carry out an asynchronous or a synchronous suspend. */
578 if (rpmflags & RPM_ASYNC) {
15bcb91d
AS
579 dev->power.request = (rpmflags & RPM_AUTO) ?
580 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
1bfee5bc
AS
581 if (!dev->power.request_pending) {
582 dev->power.request_pending = true;
583 queue_work(pm_wq, &dev->power.work);
584 }
585 goto out;
586 }
587
8d4b9d1b 588 __update_runtime_status(dev, RPM_SUSPENDING);
5e928f77 589
dbcd2d72 590 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
35cd133c 591
4990d4fe 592 dev_pm_enable_wake_irq(dev);
71c63122 593 retval = rpm_callback(callback, dev);
00dc9ad1
RW
594 if (retval)
595 goto fail;
886486b7 596
7490e442 597 no_callback:
857b36c7
ML
598 __update_runtime_status(dev, RPM_SUSPENDED);
599 pm_runtime_deactivate_timer(dev);
5e928f77 600
857b36c7
ML
601 if (dev->parent) {
602 parent = dev->parent;
603 atomic_add_unless(&parent->power.child_count, -1, 0);
5e928f77
RW
604 }
605 wake_up_all(&dev->power.wait_queue);
606
607 if (dev->power.deferred_resume) {
58a34de7 608 dev->power.deferred_resume = false;
140a6c94 609 rpm_resume(dev, 0);
5e928f77
RW
610 retval = -EAGAIN;
611 goto out;
612 }
613
c3810c88 614 /* Maybe the parent is now able to suspend. */
c7b61de5 615 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
c3810c88 616 spin_unlock(&dev->power.lock);
5e928f77 617
c3810c88
AS
618 spin_lock(&parent->power.lock);
619 rpm_idle(parent, RPM_ASYNC);
620 spin_unlock(&parent->power.lock);
5e928f77 621
c3810c88 622 spin_lock(&dev->power.lock);
5e928f77
RW
623 }
624
625 out:
77893577 626 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
5e928f77
RW
627
628 return retval;
00dc9ad1
RW
629
630 fail:
4990d4fe 631 dev_pm_disable_wake_irq(dev);
00dc9ad1 632 __update_runtime_status(dev, RPM_ACTIVE);
00dc9ad1 633 dev->power.deferred_resume = false;
f2791d73
AS
634 wake_up_all(&dev->power.wait_queue);
635
00dc9ad1
RW
636 if (retval == -EAGAIN || retval == -EBUSY) {
637 dev->power.runtime_error = 0;
638
639 /*
640 * If the callback routine failed an autosuspend, and
641 * if the last_busy time has been updated so that there
642 * is a new autosuspend expiration time, automatically
643 * reschedule another autosuspend.
644 */
645 if ((rpmflags & RPM_AUTO) &&
646 pm_runtime_autosuspend_expiration(dev) != 0)
647 goto repeat;
648 } else {
649 pm_runtime_cancel_pending(dev);
650 }
00dc9ad1 651 goto out;
5e928f77
RW
652}
653
654/**
62052ab1 655 * rpm_resume - Carry out runtime resume of given device.
5e928f77 656 * @dev: Device to resume.
3f9af051 657 * @rpmflags: Flag bits.
5e928f77 658 *
62052ab1 659 * Check if the device's runtime PM status allows it to be resumed. Cancel
1bfee5bc 660 * any scheduled or pending requests. If another resume has been started
25985edc 661 * earlier, either return immediately or wait for it to finish, depending on the
1bfee5bc
AS
662 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
663 * parallel with this function, either tell the other process to resume after
664 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
665 * flag is set then queue a resume request; otherwise run the
666 * ->runtime_resume() callback directly. Queue an idle notification for the
667 * device if the resume succeeded.
5e928f77
RW
668 *
669 * This function must be called under dev->power.lock with interrupts disabled.
670 */
140a6c94 671static int rpm_resume(struct device *dev, int rpmflags)
5e928f77
RW
672 __releases(&dev->power.lock) __acquires(&dev->power.lock)
673{
71c63122 674 int (*callback)(struct device *);
5e928f77
RW
675 struct device *parent = NULL;
676 int retval = 0;
677
d44c950e 678 trace_rpm_resume_rcuidle(dev, rpmflags);
5e928f77
RW
679
680 repeat:
1bfee5bc 681 if (dev->power.runtime_error)
5e928f77 682 retval = -EINVAL;
6f3c77b0
KH
683 else if (dev->power.disable_depth == 1 && dev->power.is_suspended
684 && dev->power.runtime_status == RPM_ACTIVE)
685 retval = 1;
1bfee5bc 686 else if (dev->power.disable_depth > 0)
632e270e 687 retval = -EACCES;
1bfee5bc 688 if (retval)
5e928f77 689 goto out;
5e928f77 690
15bcb91d
AS
691 /*
692 * Other scheduled or pending requests need to be canceled. Small
693 * optimization: If an autosuspend timer is running, leave it running
694 * rather than cancelling it now only to restart it again in the near
695 * future.
696 */
697 dev->power.request = RPM_REQ_NONE;
698 if (!dev->power.timer_autosuspends)
699 pm_runtime_deactivate_timer(dev);
5e928f77 700
1bfee5bc 701 if (dev->power.runtime_status == RPM_ACTIVE) {
5e928f77 702 retval = 1;
5e928f77 703 goto out;
1bfee5bc 704 }
5e928f77
RW
705
706 if (dev->power.runtime_status == RPM_RESUMING
707 || dev->power.runtime_status == RPM_SUSPENDING) {
708 DEFINE_WAIT(wait);
709
1bfee5bc 710 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
5e928f77
RW
711 if (dev->power.runtime_status == RPM_SUSPENDING)
712 dev->power.deferred_resume = true;
1bfee5bc
AS
713 else
714 retval = -EINPROGRESS;
5e928f77
RW
715 goto out;
716 }
717
ad3c36a5
RW
718 if (dev->power.irq_safe) {
719 spin_unlock(&dev->power.lock);
720
721 cpu_relax();
722
723 spin_lock(&dev->power.lock);
724 goto repeat;
725 }
726
5e928f77
RW
727 /* Wait for the operation carried out in parallel with us. */
728 for (;;) {
729 prepare_to_wait(&dev->power.wait_queue, &wait,
730 TASK_UNINTERRUPTIBLE);
731 if (dev->power.runtime_status != RPM_RESUMING
732 && dev->power.runtime_status != RPM_SUSPENDING)
733 break;
734
735 spin_unlock_irq(&dev->power.lock);
736
737 schedule();
738
739 spin_lock_irq(&dev->power.lock);
740 }
741 finish_wait(&dev->power.wait_queue, &wait);
742 goto repeat;
743 }
744
7490e442
AS
745 /*
746 * See if we can skip waking up the parent. This is safe only if
747 * power.no_callbacks is set, because otherwise we don't know whether
748 * the resume will actually succeed.
749 */
750 if (dev->power.no_callbacks && !parent && dev->parent) {
d63be5f9 751 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
7490e442
AS
752 if (dev->parent->power.disable_depth > 0
753 || dev->parent->power.ignore_children
754 || dev->parent->power.runtime_status == RPM_ACTIVE) {
755 atomic_inc(&dev->parent->power.child_count);
756 spin_unlock(&dev->parent->power.lock);
7f321c26 757 retval = 1;
7490e442
AS
758 goto no_callback; /* Assume success. */
759 }
760 spin_unlock(&dev->parent->power.lock);
761 }
762
1bfee5bc
AS
763 /* Carry out an asynchronous or a synchronous resume. */
764 if (rpmflags & RPM_ASYNC) {
765 dev->power.request = RPM_REQ_RESUME;
766 if (!dev->power.request_pending) {
767 dev->power.request_pending = true;
768 queue_work(pm_wq, &dev->power.work);
769 }
770 retval = 0;
771 goto out;
772 }
773
5e928f77
RW
774 if (!parent && dev->parent) {
775 /*
c7b61de5
AS
776 * Increment the parent's usage counter and resume it if
777 * necessary. Not needed if dev is irq-safe; then the
778 * parent is permanently resumed.
5e928f77
RW
779 */
780 parent = dev->parent;
c7b61de5
AS
781 if (dev->power.irq_safe)
782 goto skip_parent;
862f89b3 783 spin_unlock(&dev->power.lock);
5e928f77
RW
784
785 pm_runtime_get_noresume(parent);
786
862f89b3 787 spin_lock(&parent->power.lock);
5e928f77 788 /*
62052ab1 789 * We can resume if the parent's runtime PM is disabled or it
5e928f77
RW
790 * is set to ignore children.
791 */
792 if (!parent->power.disable_depth
793 && !parent->power.ignore_children) {
140a6c94 794 rpm_resume(parent, 0);
5e928f77
RW
795 if (parent->power.runtime_status != RPM_ACTIVE)
796 retval = -EBUSY;
797 }
862f89b3 798 spin_unlock(&parent->power.lock);
5e928f77 799
862f89b3 800 spin_lock(&dev->power.lock);
5e928f77
RW
801 if (retval)
802 goto out;
803 goto repeat;
804 }
c7b61de5 805 skip_parent:
5e928f77 806
7490e442
AS
807 if (dev->power.no_callbacks)
808 goto no_callback; /* Assume success. */
809
8d4b9d1b 810 __update_runtime_status(dev, RPM_RESUMING);
5e928f77 811
dbcd2d72 812 callback = RPM_GET_CALLBACK(dev, runtime_resume);
35cd133c 813
4990d4fe 814 dev_pm_disable_wake_irq(dev);
71c63122 815 retval = rpm_callback(callback, dev);
5e928f77 816 if (retval) {
8d4b9d1b 817 __update_runtime_status(dev, RPM_SUSPENDED);
5e928f77 818 pm_runtime_cancel_pending(dev);
4990d4fe 819 dev_pm_enable_wake_irq(dev);
5e928f77 820 } else {
7490e442 821 no_callback:
8d4b9d1b 822 __update_runtime_status(dev, RPM_ACTIVE);
56f487c7 823 pm_runtime_mark_last_busy(dev);
5e928f77
RW
824 if (parent)
825 atomic_inc(&parent->power.child_count);
826 }
827 wake_up_all(&dev->power.wait_queue);
828
7f321c26 829 if (retval >= 0)
140a6c94 830 rpm_idle(dev, RPM_ASYNC);
5e928f77
RW
831
832 out:
c7b61de5 833 if (parent && !dev->power.irq_safe) {
5e928f77
RW
834 spin_unlock_irq(&dev->power.lock);
835
836 pm_runtime_put(parent);
837
838 spin_lock_irq(&dev->power.lock);
839 }
840
d44c950e 841 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
5e928f77
RW
842
843 return retval;
844}
845
5e928f77 846/**
62052ab1 847 * pm_runtime_work - Universal runtime PM work function.
5e928f77
RW
848 * @work: Work structure used for scheduling the execution of this function.
849 *
850 * Use @work to get the device object the work is to be done for, determine what
62052ab1 851 * is to be done and execute the appropriate runtime PM function.
5e928f77
RW
852 */
853static void pm_runtime_work(struct work_struct *work)
854{
855 struct device *dev = container_of(work, struct device, power.work);
856 enum rpm_request req;
857
858 spin_lock_irq(&dev->power.lock);
859
860 if (!dev->power.request_pending)
861 goto out;
862
863 req = dev->power.request;
864 dev->power.request = RPM_REQ_NONE;
865 dev->power.request_pending = false;
866
867 switch (req) {
868 case RPM_REQ_NONE:
869 break;
870 case RPM_REQ_IDLE:
140a6c94 871 rpm_idle(dev, RPM_NOWAIT);
5e928f77
RW
872 break;
873 case RPM_REQ_SUSPEND:
140a6c94 874 rpm_suspend(dev, RPM_NOWAIT);
5e928f77 875 break;
15bcb91d
AS
876 case RPM_REQ_AUTOSUSPEND:
877 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
878 break;
5e928f77 879 case RPM_REQ_RESUME:
140a6c94 880 rpm_resume(dev, RPM_NOWAIT);
5e928f77
RW
881 break;
882 }
883
884 out:
885 spin_unlock_irq(&dev->power.lock);
886}
887
5e928f77
RW
888/**
889 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
890 * @data: Device pointer passed by pm_schedule_suspend().
891 *
1bfee5bc 892 * Check if the time is right and queue a suspend request.
5e928f77
RW
893 */
894static void pm_suspend_timer_fn(unsigned long data)
895{
896 struct device *dev = (struct device *)data;
897 unsigned long flags;
898 unsigned long expires;
899
900 spin_lock_irqsave(&dev->power.lock, flags);
901
902 expires = dev->power.timer_expires;
903 /* If 'expire' is after 'jiffies' we've been called too early. */
904 if (expires > 0 && !time_after(expires, jiffies)) {
905 dev->power.timer_expires = 0;
15bcb91d
AS
906 rpm_suspend(dev, dev->power.timer_autosuspends ?
907 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
5e928f77
RW
908 }
909
910 spin_unlock_irqrestore(&dev->power.lock, flags);
911}
912
913/**
914 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
915 * @dev: Device to suspend.
916 * @delay: Time to wait before submitting a suspend request, in milliseconds.
917 */
918int pm_schedule_suspend(struct device *dev, unsigned int delay)
919{
920 unsigned long flags;
1bfee5bc 921 int retval;
5e928f77
RW
922
923 spin_lock_irqsave(&dev->power.lock, flags);
924
5e928f77 925 if (!delay) {
140a6c94 926 retval = rpm_suspend(dev, RPM_ASYNC);
5e928f77
RW
927 goto out;
928 }
929
1bfee5bc 930 retval = rpm_check_suspend_allowed(dev);
5e928f77
RW
931 if (retval)
932 goto out;
933
1bfee5bc
AS
934 /* Other scheduled or pending requests need to be canceled. */
935 pm_runtime_cancel_pending(dev);
936
5e928f77 937 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
1bfee5bc 938 dev->power.timer_expires += !dev->power.timer_expires;
15bcb91d 939 dev->power.timer_autosuspends = 0;
5e928f77
RW
940 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
941
942 out:
943 spin_unlock_irqrestore(&dev->power.lock, flags);
944
945 return retval;
946}
947EXPORT_SYMBOL_GPL(pm_schedule_suspend);
948
5e928f77 949/**
62052ab1 950 * __pm_runtime_idle - Entry point for runtime idle operations.
140a6c94
AS
951 * @dev: Device to send idle notification for.
952 * @rpmflags: Flag bits.
953 *
954 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
955 * return immediately if it is larger than zero. Then carry out an idle
956 * notification, either synchronous or asynchronous.
957 *
311aab73
CC
958 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
959 * or if pm_runtime_irq_safe() has been called.
5e928f77 960 */
140a6c94 961int __pm_runtime_idle(struct device *dev, int rpmflags)
5e928f77
RW
962{
963 unsigned long flags;
964 int retval;
965
311aab73
CC
966 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
967
140a6c94
AS
968 if (rpmflags & RPM_GET_PUT) {
969 if (!atomic_dec_and_test(&dev->power.usage_count))
970 return 0;
971 }
972
5e928f77 973 spin_lock_irqsave(&dev->power.lock, flags);
140a6c94 974 retval = rpm_idle(dev, rpmflags);
5e928f77
RW
975 spin_unlock_irqrestore(&dev->power.lock, flags);
976
977 return retval;
978}
140a6c94 979EXPORT_SYMBOL_GPL(__pm_runtime_idle);
5e928f77
RW
980
981/**
62052ab1 982 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
140a6c94 983 * @dev: Device to suspend.
3f9af051 984 * @rpmflags: Flag bits.
5e928f77 985 *
15bcb91d
AS
986 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
987 * return immediately if it is larger than zero. Then carry out a suspend,
988 * either synchronous or asynchronous.
140a6c94 989 *
311aab73
CC
990 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
991 * or if pm_runtime_irq_safe() has been called.
5e928f77 992 */
140a6c94 993int __pm_runtime_suspend(struct device *dev, int rpmflags)
5e928f77 994{
140a6c94 995 unsigned long flags;
1d531c14 996 int retval;
5e928f77 997
311aab73
CC
998 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
999
15bcb91d
AS
1000 if (rpmflags & RPM_GET_PUT) {
1001 if (!atomic_dec_and_test(&dev->power.usage_count))
1002 return 0;
1003 }
1004
140a6c94
AS
1005 spin_lock_irqsave(&dev->power.lock, flags);
1006 retval = rpm_suspend(dev, rpmflags);
1007 spin_unlock_irqrestore(&dev->power.lock, flags);
5e928f77
RW
1008
1009 return retval;
1010}
140a6c94 1011EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
5e928f77
RW
1012
1013/**
62052ab1 1014 * __pm_runtime_resume - Entry point for runtime resume operations.
140a6c94 1015 * @dev: Device to resume.
3f9af051 1016 * @rpmflags: Flag bits.
5e928f77 1017 *
140a6c94
AS
1018 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
1019 * carry out a resume, either synchronous or asynchronous.
1020 *
311aab73
CC
1021 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1022 * or if pm_runtime_irq_safe() has been called.
5e928f77 1023 */
140a6c94 1024int __pm_runtime_resume(struct device *dev, int rpmflags)
5e928f77 1025{
140a6c94
AS
1026 unsigned long flags;
1027 int retval;
5e928f77 1028
311aab73
CC
1029 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1030
140a6c94
AS
1031 if (rpmflags & RPM_GET_PUT)
1032 atomic_inc(&dev->power.usage_count);
1033
1034 spin_lock_irqsave(&dev->power.lock, flags);
1035 retval = rpm_resume(dev, rpmflags);
1036 spin_unlock_irqrestore(&dev->power.lock, flags);
5e928f77
RW
1037
1038 return retval;
1039}
140a6c94 1040EXPORT_SYMBOL_GPL(__pm_runtime_resume);
5e928f77 1041
a436b6a1
RW
1042/**
1043 * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
1044 * @dev: Device to handle.
1045 *
1046 * Return -EINVAL if runtime PM is disabled for the device.
1047 *
1048 * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
1049 * and the runtime PM usage counter is nonzero, increment the counter and
1050 * return 1. Otherwise return 0 without changing the counter.
1051 */
1052int pm_runtime_get_if_in_use(struct device *dev)
1053{
1054 unsigned long flags;
1055 int retval;
1056
1057 spin_lock_irqsave(&dev->power.lock, flags);
1058 retval = dev->power.disable_depth > 0 ? -EINVAL :
1059 dev->power.runtime_status == RPM_ACTIVE
1060 && atomic_inc_not_zero(&dev->power.usage_count);
1061 spin_unlock_irqrestore(&dev->power.lock, flags);
1062 return retval;
1063}
1064EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
1065
5e928f77 1066/**
62052ab1 1067 * __pm_runtime_set_status - Set runtime PM status of a device.
5e928f77 1068 * @dev: Device to handle.
62052ab1 1069 * @status: New runtime PM status of the device.
5e928f77 1070 *
62052ab1 1071 * If runtime PM of the device is disabled or its power.runtime_error field is
5e928f77
RW
1072 * different from zero, the status may be changed either to RPM_ACTIVE, or to
1073 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1074 * However, if the device has a parent and the parent is not active, and the
1075 * parent's power.ignore_children flag is unset, the device's status cannot be
1076 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1077 *
1078 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1079 * and the device parent's counter of unsuspended children is modified to
1080 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
1081 * notification request for the parent is submitted.
1082 */
1083int __pm_runtime_set_status(struct device *dev, unsigned int status)
1084{
1085 struct device *parent = dev->parent;
1086 unsigned long flags;
1087 bool notify_parent = false;
1088 int error = 0;
1089
1090 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1091 return -EINVAL;
1092
1093 spin_lock_irqsave(&dev->power.lock, flags);
1094
1095 if (!dev->power.runtime_error && !dev->power.disable_depth) {
1096 error = -EAGAIN;
1097 goto out;
1098 }
1099
1100 if (dev->power.runtime_status == status)
1101 goto out_set;
1102
1103 if (status == RPM_SUSPENDED) {
1104 /* It always is possible to set the status to 'suspended'. */
1105 if (parent) {
1106 atomic_add_unless(&parent->power.child_count, -1, 0);
1107 notify_parent = !parent->power.ignore_children;
1108 }
1109 goto out_set;
1110 }
1111
1112 if (parent) {
bab636b9 1113 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
5e928f77
RW
1114
1115 /*
1116 * It is invalid to put an active child under a parent that is
62052ab1 1117 * not active, has runtime PM enabled and the
5e928f77
RW
1118 * 'power.ignore_children' flag unset.
1119 */
1120 if (!parent->power.disable_depth
1121 && !parent->power.ignore_children
71723f95
LW
1122 && parent->power.runtime_status != RPM_ACTIVE) {
1123 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1124 dev_name(dev),
1125 dev_name(parent));
5e928f77 1126 error = -EBUSY;
71723f95 1127 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
965c4ac0 1128 atomic_inc(&parent->power.child_count);
71723f95 1129 }
5e928f77 1130
862f89b3 1131 spin_unlock(&parent->power.lock);
5e928f77
RW
1132
1133 if (error)
1134 goto out;
1135 }
1136
1137 out_set:
8d4b9d1b 1138 __update_runtime_status(dev, status);
5e928f77
RW
1139 dev->power.runtime_error = 0;
1140 out:
1141 spin_unlock_irqrestore(&dev->power.lock, flags);
1142
1143 if (notify_parent)
1144 pm_request_idle(parent);
1145
1146 return error;
1147}
1148EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1149
1150/**
1151 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1152 * @dev: Device to handle.
1153 *
1154 * Flush all pending requests for the device from pm_wq and wait for all
62052ab1 1155 * runtime PM operations involving the device in progress to complete.
5e928f77
RW
1156 *
1157 * Should be called under dev->power.lock with interrupts disabled.
1158 */
1159static void __pm_runtime_barrier(struct device *dev)
1160{
1161 pm_runtime_deactivate_timer(dev);
1162
1163 if (dev->power.request_pending) {
1164 dev->power.request = RPM_REQ_NONE;
1165 spin_unlock_irq(&dev->power.lock);
1166
1167 cancel_work_sync(&dev->power.work);
1168
1169 spin_lock_irq(&dev->power.lock);
1170 dev->power.request_pending = false;
1171 }
1172
1173 if (dev->power.runtime_status == RPM_SUSPENDING
1174 || dev->power.runtime_status == RPM_RESUMING
1175 || dev->power.idle_notification) {
1176 DEFINE_WAIT(wait);
1177
1178 /* Suspend, wake-up or idle notification in progress. */
1179 for (;;) {
1180 prepare_to_wait(&dev->power.wait_queue, &wait,
1181 TASK_UNINTERRUPTIBLE);
1182 if (dev->power.runtime_status != RPM_SUSPENDING
1183 && dev->power.runtime_status != RPM_RESUMING
1184 && !dev->power.idle_notification)
1185 break;
1186 spin_unlock_irq(&dev->power.lock);
1187
1188 schedule();
1189
1190 spin_lock_irq(&dev->power.lock);
1191 }
1192 finish_wait(&dev->power.wait_queue, &wait);
1193 }
1194}
1195
1196/**
1197 * pm_runtime_barrier - Flush pending requests and wait for completions.
1198 * @dev: Device to handle.
1199 *
1200 * Prevent the device from being suspended by incrementing its usage counter and
1201 * if there's a pending resume request for the device, wake the device up.
1202 * Next, make sure that all pending requests for the device have been flushed
62052ab1 1203 * from pm_wq and wait for all runtime PM operations involving the device in
5e928f77
RW
1204 * progress to complete.
1205 *
1206 * Return value:
1207 * 1, if there was a resume request pending and the device had to be woken up,
1208 * 0, otherwise
1209 */
1210int pm_runtime_barrier(struct device *dev)
1211{
1212 int retval = 0;
1213
1214 pm_runtime_get_noresume(dev);
1215 spin_lock_irq(&dev->power.lock);
1216
1217 if (dev->power.request_pending
1218 && dev->power.request == RPM_REQ_RESUME) {
140a6c94 1219 rpm_resume(dev, 0);
5e928f77
RW
1220 retval = 1;
1221 }
1222
1223 __pm_runtime_barrier(dev);
1224
1225 spin_unlock_irq(&dev->power.lock);
1226 pm_runtime_put_noidle(dev);
1227
1228 return retval;
1229}
1230EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1231
1232/**
62052ab1 1233 * __pm_runtime_disable - Disable runtime PM of a device.
5e928f77
RW
1234 * @dev: Device to handle.
1235 * @check_resume: If set, check if there's a resume request for the device.
1236 *
7b60894f 1237 * Increment power.disable_depth for the device and if it was zero previously,
62052ab1 1238 * cancel all pending runtime PM requests for the device and wait for all
5e928f77 1239 * operations in progress to complete. The device can be either active or
62052ab1 1240 * suspended after its runtime PM has been disabled.
5e928f77
RW
1241 *
1242 * If @check_resume is set and there's a resume request pending when
1243 * __pm_runtime_disable() is called and power.disable_depth is zero, the
62052ab1 1244 * function will wake up the device before disabling its runtime PM.
5e928f77
RW
1245 */
1246void __pm_runtime_disable(struct device *dev, bool check_resume)
1247{
1248 spin_lock_irq(&dev->power.lock);
1249
1250 if (dev->power.disable_depth > 0) {
1251 dev->power.disable_depth++;
1252 goto out;
1253 }
1254
1255 /*
1256 * Wake up the device if there's a resume request pending, because that
62052ab1 1257 * means there probably is some I/O to process and disabling runtime PM
5e928f77
RW
1258 * shouldn't prevent the device from processing the I/O.
1259 */
1260 if (check_resume && dev->power.request_pending
1261 && dev->power.request == RPM_REQ_RESUME) {
1262 /*
1263 * Prevent suspends and idle notifications from being carried
1264 * out after we have woken up the device.
1265 */
1266 pm_runtime_get_noresume(dev);
1267
140a6c94 1268 rpm_resume(dev, 0);
5e928f77
RW
1269
1270 pm_runtime_put_noidle(dev);
1271 }
1272
1273 if (!dev->power.disable_depth++)
1274 __pm_runtime_barrier(dev);
1275
1276 out:
1277 spin_unlock_irq(&dev->power.lock);
1278}
1279EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1280
1281/**
62052ab1 1282 * pm_runtime_enable - Enable runtime PM of a device.
5e928f77
RW
1283 * @dev: Device to handle.
1284 */
1285void pm_runtime_enable(struct device *dev)
1286{
1287 unsigned long flags;
1288
1289 spin_lock_irqsave(&dev->power.lock, flags);
1290
1291 if (dev->power.disable_depth > 0)
1292 dev->power.disable_depth--;
1293 else
1294 dev_warn(dev, "Unbalanced %s!\n", __func__);
1295
1296 spin_unlock_irqrestore(&dev->power.lock, flags);
1297}
1298EXPORT_SYMBOL_GPL(pm_runtime_enable);
1299
53823639 1300/**
62052ab1 1301 * pm_runtime_forbid - Block runtime PM of a device.
53823639
RW
1302 * @dev: Device to handle.
1303 *
1304 * Increase the device's usage count and clear its power.runtime_auto flag,
1305 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1306 * for it.
1307 */
1308void pm_runtime_forbid(struct device *dev)
1309{
1310 spin_lock_irq(&dev->power.lock);
1311 if (!dev->power.runtime_auto)
1312 goto out;
1313
1314 dev->power.runtime_auto = false;
1315 atomic_inc(&dev->power.usage_count);
140a6c94 1316 rpm_resume(dev, 0);
53823639
RW
1317
1318 out:
1319 spin_unlock_irq(&dev->power.lock);
1320}
1321EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1322
1323/**
62052ab1 1324 * pm_runtime_allow - Unblock runtime PM of a device.
53823639
RW
1325 * @dev: Device to handle.
1326 *
1327 * Decrease the device's usage count and set its power.runtime_auto flag.
1328 */
1329void pm_runtime_allow(struct device *dev)
1330{
1331 spin_lock_irq(&dev->power.lock);
1332 if (dev->power.runtime_auto)
1333 goto out;
1334
1335 dev->power.runtime_auto = true;
1336 if (atomic_dec_and_test(&dev->power.usage_count))
fe7450b0 1337 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
53823639
RW
1338
1339 out:
1340 spin_unlock_irq(&dev->power.lock);
1341}
1342EXPORT_SYMBOL_GPL(pm_runtime_allow);
1343
7490e442 1344/**
62052ab1 1345 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
7490e442
AS
1346 * @dev: Device to handle.
1347 *
1348 * Set the power.no_callbacks flag, which tells the PM core that this
62052ab1
RW
1349 * device is power-managed through its parent and has no runtime PM
1350 * callbacks of its own. The runtime sysfs attributes will be removed.
7490e442
AS
1351 */
1352void pm_runtime_no_callbacks(struct device *dev)
1353{
1354 spin_lock_irq(&dev->power.lock);
1355 dev->power.no_callbacks = 1;
1356 spin_unlock_irq(&dev->power.lock);
1357 if (device_is_registered(dev))
1358 rpm_sysfs_remove(dev);
1359}
1360EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1361
c7b61de5
AS
1362/**
1363 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1364 * @dev: Device to handle
1365 *
1366 * Set the power.irq_safe flag, which tells the PM core that the
1367 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1368 * always be invoked with the spinlock held and interrupts disabled. It also
1369 * causes the parent's usage counter to be permanently incremented, preventing
1370 * the parent from runtime suspending -- otherwise an irq-safe child might have
1371 * to wait for a non-irq-safe parent.
1372 */
1373void pm_runtime_irq_safe(struct device *dev)
1374{
1375 if (dev->parent)
1376 pm_runtime_get_sync(dev->parent);
1377 spin_lock_irq(&dev->power.lock);
1378 dev->power.irq_safe = 1;
1379 spin_unlock_irq(&dev->power.lock);
1380}
1381EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1382
15bcb91d
AS
1383/**
1384 * update_autosuspend - Handle a change to a device's autosuspend settings.
1385 * @dev: Device to handle.
1386 * @old_delay: The former autosuspend_delay value.
1387 * @old_use: The former use_autosuspend value.
1388 *
1389 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1390 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1391 *
1392 * This function must be called under dev->power.lock with interrupts disabled.
1393 */
1394static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1395{
1396 int delay = dev->power.autosuspend_delay;
1397
1398 /* Should runtime suspend be prevented now? */
1399 if (dev->power.use_autosuspend && delay < 0) {
1400
1401 /* If it used to be allowed then prevent it. */
1402 if (!old_use || old_delay >= 0) {
1403 atomic_inc(&dev->power.usage_count);
1404 rpm_resume(dev, 0);
1405 }
1406 }
1407
1408 /* Runtime suspend should be allowed now. */
1409 else {
1410
1411 /* If it used to be prevented then allow it. */
1412 if (old_use && old_delay < 0)
1413 atomic_dec(&dev->power.usage_count);
1414
1415 /* Maybe we can autosuspend now. */
1416 rpm_idle(dev, RPM_AUTO);
1417 }
1418}
1419
1420/**
1421 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1422 * @dev: Device to handle.
1423 * @delay: Value of the new delay in milliseconds.
1424 *
1425 * Set the device's power.autosuspend_delay value. If it changes to negative
62052ab1
RW
1426 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1427 * changes the other way, allow runtime suspends.
15bcb91d
AS
1428 */
1429void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1430{
1431 int old_delay, old_use;
1432
1433 spin_lock_irq(&dev->power.lock);
1434 old_delay = dev->power.autosuspend_delay;
1435 old_use = dev->power.use_autosuspend;
1436 dev->power.autosuspend_delay = delay;
1437 update_autosuspend(dev, old_delay, old_use);
1438 spin_unlock_irq(&dev->power.lock);
1439}
1440EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1441
1442/**
1443 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1444 * @dev: Device to handle.
1445 * @use: New value for use_autosuspend.
1446 *
62052ab1 1447 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
15bcb91d
AS
1448 * suspends as needed.
1449 */
1450void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1451{
1452 int old_delay, old_use;
1453
1454 spin_lock_irq(&dev->power.lock);
1455 old_delay = dev->power.autosuspend_delay;
1456 old_use = dev->power.use_autosuspend;
1457 dev->power.use_autosuspend = use;
1458 update_autosuspend(dev, old_delay, old_use);
1459 spin_unlock_irq(&dev->power.lock);
1460}
1461EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1462
5e928f77 1463/**
62052ab1 1464 * pm_runtime_init - Initialize runtime PM fields in given device object.
5e928f77
RW
1465 * @dev: Device object to initialize.
1466 */
1467void pm_runtime_init(struct device *dev)
1468{
5e928f77
RW
1469 dev->power.runtime_status = RPM_SUSPENDED;
1470 dev->power.idle_notification = false;
1471
1472 dev->power.disable_depth = 1;
1473 atomic_set(&dev->power.usage_count, 0);
1474
1475 dev->power.runtime_error = 0;
1476
1477 atomic_set(&dev->power.child_count, 0);
1478 pm_suspend_ignore_children(dev, false);
53823639 1479 dev->power.runtime_auto = true;
5e928f77
RW
1480
1481 dev->power.request_pending = false;
1482 dev->power.request = RPM_REQ_NONE;
1483 dev->power.deferred_resume = false;
8d4b9d1b 1484 dev->power.accounting_timestamp = jiffies;
5e928f77
RW
1485 INIT_WORK(&dev->power.work, pm_runtime_work);
1486
1487 dev->power.timer_expires = 0;
1488 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1489 (unsigned long)dev);
1490
1491 init_waitqueue_head(&dev->power.wait_queue);
1492}
1493
5de85b9d
UH
1494/**
1495 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1496 * @dev: Device object to re-initialize.
1497 */
1498void pm_runtime_reinit(struct device *dev)
1499{
1500 if (!pm_runtime_enabled(dev)) {
1501 if (dev->power.runtime_status == RPM_ACTIVE)
1502 pm_runtime_set_suspended(dev);
1503 if (dev->power.irq_safe) {
1504 spin_lock_irq(&dev->power.lock);
1505 dev->power.irq_safe = 0;
1506 spin_unlock_irq(&dev->power.lock);
1507 if (dev->parent)
1508 pm_runtime_put(dev->parent);
1509 }
1510 }
1511}
1512
5e928f77
RW
1513/**
1514 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1515 * @dev: Device object being removed from device hierarchy.
1516 */
1517void pm_runtime_remove(struct device *dev)
1518{
1519 __pm_runtime_disable(dev, false);
5de85b9d 1520 pm_runtime_reinit(dev);
5e928f77 1521}
37f20416 1522
21d5c57b
RW
1523/**
1524 * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
1525 * @dev: Device whose driver is going to be removed.
1526 *
1527 * Check links from this device to any consumers and if any of them have active
1528 * runtime PM references to the device, drop the usage counter of the device
1529 * (once per link).
1530 *
1531 * Links with the DL_FLAG_STATELESS flag set are ignored.
1532 *
1533 * Since the device is guaranteed to be runtime-active at the point this is
1534 * called, nothing else needs to be done here.
1535 *
1536 * Moreover, this is called after device_links_busy() has returned 'false', so
1537 * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
1538 * therefore rpm_active can't be manipulated concurrently.
1539 */
1540void pm_runtime_clean_up_links(struct device *dev)
1541{
1542 struct device_link *link;
1543 int idx;
1544
1545 idx = device_links_read_lock();
1546
1547 list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
1548 if (link->flags & DL_FLAG_STATELESS)
1549 continue;
1550
1551 if (link->rpm_active) {
1552 pm_runtime_put_noidle(dev);
1553 link->rpm_active = false;
1554 }
1555 }
1556
1557 device_links_read_unlock(idx);
1558}
1559
1560/**
1561 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1562 * @dev: Consumer device.
1563 */
1564void pm_runtime_get_suppliers(struct device *dev)
1565{
1566 struct device_link *link;
1567 int idx;
1568
1569 idx = device_links_read_lock();
1570
1571 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1572 if (link->flags & DL_FLAG_PM_RUNTIME)
1573 pm_runtime_get_sync(link->supplier);
1574
1575 device_links_read_unlock(idx);
1576}
1577
1578/**
1579 * pm_runtime_put_suppliers - Drop references to supplier devices.
1580 * @dev: Consumer device.
1581 */
1582void pm_runtime_put_suppliers(struct device *dev)
1583{
1584 struct device_link *link;
1585 int idx;
1586
1587 idx = device_links_read_lock();
1588
1589 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1590 if (link->flags & DL_FLAG_PM_RUNTIME)
1591 pm_runtime_put(link->supplier);
1592
1593 device_links_read_unlock(idx);
1594}
1595
37f20416
UH
1596/**
1597 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1598 * @dev: Device to suspend.
1599 *
1600 * Disable runtime PM so we safely can check the device's runtime PM status and
1601 * if it is active, invoke it's .runtime_suspend callback to bring it into
1602 * suspend state. Keep runtime PM disabled to preserve the state unless we
1603 * encounter errors.
1604 *
1605 * Typically this function may be invoked from a system suspend callback to make
1606 * sure the device is put into low power state.
1607 */
1608int pm_runtime_force_suspend(struct device *dev)
1609{
1610 int (*callback)(struct device *);
1611 int ret = 0;
1612
1613 pm_runtime_disable(dev);
37f20416
UH
1614 if (pm_runtime_status_suspended(dev))
1615 return 0;
1616
dbcd2d72 1617 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
37f20416
UH
1618
1619 if (!callback) {
1620 ret = -ENOSYS;
1621 goto err;
1622 }
1623
1624 ret = callback(dev);
1625 if (ret)
1626 goto err;
1627
1628 pm_runtime_set_suspended(dev);
1629 return 0;
1630err:
1631 pm_runtime_enable(dev);
1632 return ret;
1633}
1634EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1635
1636/**
1637 * pm_runtime_force_resume - Force a device into resume state.
1638 * @dev: Device to resume.
1639 *
1640 * Prior invoking this function we expect the user to have brought the device
1641 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1642 * those actions and brings the device into full power. We update the runtime PM
1643 * status and re-enables runtime PM.
1644 *
1645 * Typically this function may be invoked from a system resume callback to make
1646 * sure the device is put into full power state.
1647 */
1648int pm_runtime_force_resume(struct device *dev)
1649{
1650 int (*callback)(struct device *);
1651 int ret = 0;
1652
dbcd2d72 1653 callback = RPM_GET_CALLBACK(dev, runtime_resume);
37f20416
UH
1654
1655 if (!callback) {
1656 ret = -ENOSYS;
1657 goto out;
1658 }
1659
9f5b5274
UH
1660 if (!pm_runtime_status_suspended(dev))
1661 goto out;
1662
0ae3aeef 1663 ret = pm_runtime_set_active(dev);
37f20416
UH
1664 if (ret)
1665 goto out;
1666
0ae3aeef
UH
1667 ret = callback(dev);
1668 if (ret) {
1669 pm_runtime_set_suspended(dev);
1670 goto out;
1671 }
1672
37f20416
UH
1673 pm_runtime_mark_last_busy(dev);
1674out:
1675 pm_runtime_enable(dev);
1676 return ret;
1677}
1678EXPORT_SYMBOL_GPL(pm_runtime_force_resume);