2 * drivers/base/power/runtime.c - Helper functions for device runtime PM
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
7 * This file is released under the GPLv2.
10 #include <linux/sched.h>
11 #include <linux/export.h>
12 #include <linux/pm_runtime.h>
13 #include <trace/events/rpm.h>
16 static int rpm_resume(struct device *dev, int rpmflags);
17 static int rpm_suspend(struct device *dev, int rpmflags);
20 * update_pm_runtime_accounting - Update the time accounting of power states
21 * @dev: Device to update the accounting for
23 * In order to be able to have time accounting of the various power states
24 * (as used by programs such as PowerTOP to show the effectiveness of runtime
25 * PM), we need to track the time spent in each state.
26 * update_pm_runtime_accounting must be called each time before the
27 * runtime_status field is updated, to account the time in the old state
30 void update_pm_runtime_accounting(struct device *dev)
32 unsigned long now = jiffies;
35 delta = now - dev->power.accounting_timestamp;
40 dev->power.accounting_timestamp = now;
42 if (dev->power.disable_depth > 0)
45 if (dev->power.runtime_status == RPM_SUSPENDED)
46 dev->power.suspended_jiffies += delta;
48 dev->power.active_jiffies += delta;
51 static void __update_runtime_status(struct device *dev, enum rpm_status status)
53 update_pm_runtime_accounting(dev);
54 dev->power.runtime_status = status;
58 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
59 * @dev: Device to handle.
61 static void pm_runtime_deactivate_timer(struct device *dev)
63 if (dev->power.timer_expires > 0) {
64 del_timer(&dev->power.suspend_timer);
65 dev->power.timer_expires = 0;
70 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
71 * @dev: Device to handle.
73 static void pm_runtime_cancel_pending(struct device *dev)
75 pm_runtime_deactivate_timer(dev);
77 * In case there's a request pending, make sure its work function will
78 * return without doing anything.
80 dev->power.request = RPM_REQ_NONE;
84 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
85 * @dev: Device to handle.
87 * Compute the autosuspend-delay expiration time based on the device's
88 * power.last_busy time. If the delay has already expired or is disabled
89 * (negative) or the power.use_autosuspend flag isn't set, return 0.
90 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
92 * This function may be called either with or without dev->power.lock held.
93 * Either way it can be racy, since power.last_busy may be updated at any time.
95 unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
97 int autosuspend_delay;
99 unsigned long last_busy;
100 unsigned long expires = 0;
102 if (!dev->power.use_autosuspend)
105 autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
106 if (autosuspend_delay < 0)
109 last_busy = ACCESS_ONCE(dev->power.last_busy);
110 elapsed = jiffies - last_busy;
112 goto out; /* jiffies has wrapped around. */
115 * If the autosuspend_delay is >= 1 second, align the timer by rounding
116 * up to the nearest second.
118 expires = last_busy + msecs_to_jiffies(autosuspend_delay);
119 if (autosuspend_delay >= 1000)
120 expires = round_jiffies(expires);
122 if (elapsed >= expires - last_busy)
123 expires = 0; /* Already expired. */
128 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
131 * rpm_check_suspend_allowed - Test whether a device may be suspended.
132 * @dev: Device to test.
134 static int rpm_check_suspend_allowed(struct device *dev)
138 if (dev->power.runtime_error)
140 else if (dev->power.disable_depth > 0)
142 else if (atomic_read(&dev->power.usage_count) > 0)
144 else if (!pm_children_suspended(dev))
147 /* Pending resume requests take precedence over suspends. */
148 else if ((dev->power.deferred_resume
149 && dev->power.runtime_status == RPM_SUSPENDING)
150 || (dev->power.request_pending
151 && dev->power.request == RPM_REQ_RESUME))
153 else if (dev->power.runtime_status == RPM_SUSPENDED)
160 * __rpm_callback - Run a given runtime PM callback for a given device.
161 * @cb: Runtime PM callback to run.
162 * @dev: Device to run the callback for.
164 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
165 __releases(&dev->power.lock) __acquires(&dev->power.lock)
169 if (dev->power.irq_safe)
170 spin_unlock(&dev->power.lock);
172 spin_unlock_irq(&dev->power.lock);
176 if (dev->power.irq_safe)
177 spin_lock(&dev->power.lock);
179 spin_lock_irq(&dev->power.lock);
185 * rpm_idle - Notify device bus type if the device can be suspended.
186 * @dev: Device to notify the bus type about.
187 * @rpmflags: Flag bits.
189 * Check if the device's runtime PM status allows it to be suspended. If
190 * another idle notification has been started earlier, return immediately. If
191 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
192 * run the ->runtime_idle() callback directly.
194 * This function must be called under dev->power.lock with interrupts disabled.
196 static int rpm_idle(struct device *dev, int rpmflags)
198 int (*callback)(struct device *);
201 trace_rpm_idle(dev, rpmflags);
202 retval = rpm_check_suspend_allowed(dev);
204 ; /* Conditions are wrong. */
206 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
207 else if (dev->power.runtime_status != RPM_ACTIVE)
211 * Any pending request other than an idle notification takes
212 * precedence over us, except that the timer may be running.
214 else if (dev->power.request_pending &&
215 dev->power.request > RPM_REQ_IDLE)
218 /* Act as though RPM_NOWAIT is always set. */
219 else if (dev->power.idle_notification)
220 retval = -EINPROGRESS;
224 /* Pending requests need to be canceled. */
225 dev->power.request = RPM_REQ_NONE;
227 if (dev->power.no_callbacks) {
228 /* Assume ->runtime_idle() callback would have suspended. */
229 retval = rpm_suspend(dev, rpmflags);
233 /* Carry out an asynchronous or a synchronous idle notification. */
234 if (rpmflags & RPM_ASYNC) {
235 dev->power.request = RPM_REQ_IDLE;
236 if (!dev->power.request_pending) {
237 dev->power.request_pending = true;
238 queue_work(pm_wq, &dev->power.work);
243 dev->power.idle_notification = true;
246 callback = dev->pm_domain->ops.runtime_idle;
247 else if (dev->type && dev->type->pm)
248 callback = dev->type->pm->runtime_idle;
249 else if (dev->class && dev->class->pm)
250 callback = dev->class->pm->runtime_idle;
251 else if (dev->bus && dev->bus->pm)
252 callback = dev->bus->pm->runtime_idle;
257 __rpm_callback(callback, dev);
259 dev->power.idle_notification = false;
260 wake_up_all(&dev->power.wait_queue);
263 trace_rpm_return_int(dev, _THIS_IP_, retval);
268 * rpm_callback - Run a given runtime PM callback for a given device.
269 * @cb: Runtime PM callback to run.
270 * @dev: Device to run the callback for.
272 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
279 retval = __rpm_callback(cb, dev);
281 dev->power.runtime_error = retval;
282 return retval != -EACCES ? retval : -EIO;
286 * rpm_suspend - Carry out runtime suspend of given device.
287 * @dev: Device to suspend.
288 * @rpmflags: Flag bits.
290 * Check if the device's runtime PM status allows it to be suspended.
291 * Cancel a pending idle notification, autosuspend or suspend. If
292 * another suspend has been started earlier, either return immediately
293 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
294 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
295 * otherwise run the ->runtime_suspend() callback directly. When
296 * ->runtime_suspend succeeded, if a deferred resume was requested while
297 * the callback was running then carry it out, otherwise send an idle
298 * notification for its parent (if the suspend succeeded and both
299 * ignore_children of parent->power and irq_safe of dev->power are not set).
301 * This function must be called under dev->power.lock with interrupts disabled.
303 static int rpm_suspend(struct device *dev, int rpmflags)
304 __releases(&dev->power.lock) __acquires(&dev->power.lock)
306 int (*callback)(struct device *);
307 struct device *parent = NULL;
310 trace_rpm_suspend(dev, rpmflags);
313 retval = rpm_check_suspend_allowed(dev);
316 ; /* Conditions are wrong. */
318 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
319 else if (dev->power.runtime_status == RPM_RESUMING &&
320 !(rpmflags & RPM_ASYNC))
325 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
326 if ((rpmflags & RPM_AUTO)
327 && dev->power.runtime_status != RPM_SUSPENDING) {
328 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
331 /* Pending requests need to be canceled. */
332 dev->power.request = RPM_REQ_NONE;
335 * Optimization: If the timer is already running and is
336 * set to expire at or before the autosuspend delay,
337 * avoid the overhead of resetting it. Just let it
338 * expire; pm_suspend_timer_fn() will take care of the
341 if (!(dev->power.timer_expires && time_before_eq(
342 dev->power.timer_expires, expires))) {
343 dev->power.timer_expires = expires;
344 mod_timer(&dev->power.suspend_timer, expires);
346 dev->power.timer_autosuspends = 1;
351 /* Other scheduled or pending requests need to be canceled. */
352 pm_runtime_cancel_pending(dev);
354 if (dev->power.runtime_status == RPM_SUSPENDING) {
357 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
358 retval = -EINPROGRESS;
362 if (dev->power.irq_safe) {
363 spin_unlock(&dev->power.lock);
367 spin_lock(&dev->power.lock);
371 /* Wait for the other suspend running in parallel with us. */
373 prepare_to_wait(&dev->power.wait_queue, &wait,
374 TASK_UNINTERRUPTIBLE);
375 if (dev->power.runtime_status != RPM_SUSPENDING)
378 spin_unlock_irq(&dev->power.lock);
382 spin_lock_irq(&dev->power.lock);
384 finish_wait(&dev->power.wait_queue, &wait);
388 dev->power.deferred_resume = false;
389 if (dev->power.no_callbacks)
390 goto no_callback; /* Assume success. */
392 /* Carry out an asynchronous or a synchronous suspend. */
393 if (rpmflags & RPM_ASYNC) {
394 dev->power.request = (rpmflags & RPM_AUTO) ?
395 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
396 if (!dev->power.request_pending) {
397 dev->power.request_pending = true;
398 queue_work(pm_wq, &dev->power.work);
403 __update_runtime_status(dev, RPM_SUSPENDING);
406 callback = dev->pm_domain->ops.runtime_suspend;
407 else if (dev->type && dev->type->pm)
408 callback = dev->type->pm->runtime_suspend;
409 else if (dev->class && dev->class->pm)
410 callback = dev->class->pm->runtime_suspend;
411 else if (dev->bus && dev->bus->pm)
412 callback = dev->bus->pm->runtime_suspend;
416 retval = rpm_callback(callback, dev);
418 __update_runtime_status(dev, RPM_ACTIVE);
419 dev->power.deferred_resume = false;
420 if (retval == -EAGAIN || retval == -EBUSY)
421 dev->power.runtime_error = 0;
423 pm_runtime_cancel_pending(dev);
424 wake_up_all(&dev->power.wait_queue);
428 __update_runtime_status(dev, RPM_SUSPENDED);
429 pm_runtime_deactivate_timer(dev);
432 parent = dev->parent;
433 atomic_add_unless(&parent->power.child_count, -1, 0);
435 wake_up_all(&dev->power.wait_queue);
437 if (dev->power.deferred_resume) {
443 /* Maybe the parent is now able to suspend. */
444 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
445 spin_unlock(&dev->power.lock);
447 spin_lock(&parent->power.lock);
448 rpm_idle(parent, RPM_ASYNC);
449 spin_unlock(&parent->power.lock);
451 spin_lock(&dev->power.lock);
455 trace_rpm_return_int(dev, _THIS_IP_, retval);
461 * rpm_resume - Carry out runtime resume of given device.
462 * @dev: Device to resume.
463 * @rpmflags: Flag bits.
465 * Check if the device's runtime PM status allows it to be resumed. Cancel
466 * any scheduled or pending requests. If another resume has been started
467 * earlier, either return immediately or wait for it to finish, depending on the
468 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
469 * parallel with this function, either tell the other process to resume after
470 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
471 * flag is set then queue a resume request; otherwise run the
472 * ->runtime_resume() callback directly. Queue an idle notification for the
473 * device if the resume succeeded.
475 * This function must be called under dev->power.lock with interrupts disabled.
477 static int rpm_resume(struct device *dev, int rpmflags)
478 __releases(&dev->power.lock) __acquires(&dev->power.lock)
480 int (*callback)(struct device *);
481 struct device *parent = NULL;
484 trace_rpm_resume(dev, rpmflags);
487 if (dev->power.runtime_error)
489 else if (dev->power.disable_depth > 0)
495 * Other scheduled or pending requests need to be canceled. Small
496 * optimization: If an autosuspend timer is running, leave it running
497 * rather than cancelling it now only to restart it again in the near
500 dev->power.request = RPM_REQ_NONE;
501 if (!dev->power.timer_autosuspends)
502 pm_runtime_deactivate_timer(dev);
504 if (dev->power.runtime_status == RPM_ACTIVE) {
509 if (dev->power.runtime_status == RPM_RESUMING
510 || dev->power.runtime_status == RPM_SUSPENDING) {
513 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
514 if (dev->power.runtime_status == RPM_SUSPENDING)
515 dev->power.deferred_resume = true;
517 retval = -EINPROGRESS;
521 if (dev->power.irq_safe) {
522 spin_unlock(&dev->power.lock);
526 spin_lock(&dev->power.lock);
530 /* Wait for the operation carried out in parallel with us. */
532 prepare_to_wait(&dev->power.wait_queue, &wait,
533 TASK_UNINTERRUPTIBLE);
534 if (dev->power.runtime_status != RPM_RESUMING
535 && dev->power.runtime_status != RPM_SUSPENDING)
538 spin_unlock_irq(&dev->power.lock);
542 spin_lock_irq(&dev->power.lock);
544 finish_wait(&dev->power.wait_queue, &wait);
549 * See if we can skip waking up the parent. This is safe only if
550 * power.no_callbacks is set, because otherwise we don't know whether
551 * the resume will actually succeed.
553 if (dev->power.no_callbacks && !parent && dev->parent) {
554 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
555 if (dev->parent->power.disable_depth > 0
556 || dev->parent->power.ignore_children
557 || dev->parent->power.runtime_status == RPM_ACTIVE) {
558 atomic_inc(&dev->parent->power.child_count);
559 spin_unlock(&dev->parent->power.lock);
560 goto no_callback; /* Assume success. */
562 spin_unlock(&dev->parent->power.lock);
565 /* Carry out an asynchronous or a synchronous resume. */
566 if (rpmflags & RPM_ASYNC) {
567 dev->power.request = RPM_REQ_RESUME;
568 if (!dev->power.request_pending) {
569 dev->power.request_pending = true;
570 queue_work(pm_wq, &dev->power.work);
576 if (!parent && dev->parent) {
578 * Increment the parent's usage counter and resume it if
579 * necessary. Not needed if dev is irq-safe; then the
580 * parent is permanently resumed.
582 parent = dev->parent;
583 if (dev->power.irq_safe)
585 spin_unlock(&dev->power.lock);
587 pm_runtime_get_noresume(parent);
589 spin_lock(&parent->power.lock);
591 * We can resume if the parent's runtime PM is disabled or it
592 * is set to ignore children.
594 if (!parent->power.disable_depth
595 && !parent->power.ignore_children) {
596 rpm_resume(parent, 0);
597 if (parent->power.runtime_status != RPM_ACTIVE)
600 spin_unlock(&parent->power.lock);
602 spin_lock(&dev->power.lock);
609 if (dev->power.no_callbacks)
610 goto no_callback; /* Assume success. */
612 __update_runtime_status(dev, RPM_RESUMING);
615 callback = dev->pm_domain->ops.runtime_resume;
616 else if (dev->type && dev->type->pm)
617 callback = dev->type->pm->runtime_resume;
618 else if (dev->class && dev->class->pm)
619 callback = dev->class->pm->runtime_resume;
620 else if (dev->bus && dev->bus->pm)
621 callback = dev->bus->pm->runtime_resume;
625 retval = rpm_callback(callback, dev);
627 __update_runtime_status(dev, RPM_SUSPENDED);
628 pm_runtime_cancel_pending(dev);
631 __update_runtime_status(dev, RPM_ACTIVE);
633 atomic_inc(&parent->power.child_count);
635 wake_up_all(&dev->power.wait_queue);
638 rpm_idle(dev, RPM_ASYNC);
641 if (parent && !dev->power.irq_safe) {
642 spin_unlock_irq(&dev->power.lock);
644 pm_runtime_put(parent);
646 spin_lock_irq(&dev->power.lock);
649 trace_rpm_return_int(dev, _THIS_IP_, retval);
655 * pm_runtime_work - Universal runtime PM work function.
656 * @work: Work structure used for scheduling the execution of this function.
658 * Use @work to get the device object the work is to be done for, determine what
659 * is to be done and execute the appropriate runtime PM function.
661 static void pm_runtime_work(struct work_struct *work)
663 struct device *dev = container_of(work, struct device, power.work);
664 enum rpm_request req;
666 spin_lock_irq(&dev->power.lock);
668 if (!dev->power.request_pending)
671 req = dev->power.request;
672 dev->power.request = RPM_REQ_NONE;
673 dev->power.request_pending = false;
679 rpm_idle(dev, RPM_NOWAIT);
681 case RPM_REQ_SUSPEND:
682 rpm_suspend(dev, RPM_NOWAIT);
684 case RPM_REQ_AUTOSUSPEND:
685 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
688 rpm_resume(dev, RPM_NOWAIT);
693 spin_unlock_irq(&dev->power.lock);
697 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
698 * @data: Device pointer passed by pm_schedule_suspend().
700 * Check if the time is right and queue a suspend request.
702 static void pm_suspend_timer_fn(unsigned long data)
704 struct device *dev = (struct device *)data;
706 unsigned long expires;
708 spin_lock_irqsave(&dev->power.lock, flags);
710 expires = dev->power.timer_expires;
711 /* If 'expire' is after 'jiffies' we've been called too early. */
712 if (expires > 0 && !time_after(expires, jiffies)) {
713 dev->power.timer_expires = 0;
714 rpm_suspend(dev, dev->power.timer_autosuspends ?
715 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
718 spin_unlock_irqrestore(&dev->power.lock, flags);
722 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
723 * @dev: Device to suspend.
724 * @delay: Time to wait before submitting a suspend request, in milliseconds.
726 int pm_schedule_suspend(struct device *dev, unsigned int delay)
731 spin_lock_irqsave(&dev->power.lock, flags);
734 retval = rpm_suspend(dev, RPM_ASYNC);
738 retval = rpm_check_suspend_allowed(dev);
742 /* Other scheduled or pending requests need to be canceled. */
743 pm_runtime_cancel_pending(dev);
745 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
746 dev->power.timer_expires += !dev->power.timer_expires;
747 dev->power.timer_autosuspends = 0;
748 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
751 spin_unlock_irqrestore(&dev->power.lock, flags);
755 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
758 * __pm_runtime_idle - Entry point for runtime idle operations.
759 * @dev: Device to send idle notification for.
760 * @rpmflags: Flag bits.
762 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
763 * return immediately if it is larger than zero. Then carry out an idle
764 * notification, either synchronous or asynchronous.
766 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
767 * or if pm_runtime_irq_safe() has been called.
769 int __pm_runtime_idle(struct device *dev, int rpmflags)
774 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
776 if (rpmflags & RPM_GET_PUT) {
777 if (!atomic_dec_and_test(&dev->power.usage_count))
781 spin_lock_irqsave(&dev->power.lock, flags);
782 retval = rpm_idle(dev, rpmflags);
783 spin_unlock_irqrestore(&dev->power.lock, flags);
787 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
790 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
791 * @dev: Device to suspend.
792 * @rpmflags: Flag bits.
794 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
795 * return immediately if it is larger than zero. Then carry out a suspend,
796 * either synchronous or asynchronous.
798 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
799 * or if pm_runtime_irq_safe() has been called.
801 int __pm_runtime_suspend(struct device *dev, int rpmflags)
806 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
808 if (rpmflags & RPM_GET_PUT) {
809 if (!atomic_dec_and_test(&dev->power.usage_count))
813 spin_lock_irqsave(&dev->power.lock, flags);
814 retval = rpm_suspend(dev, rpmflags);
815 spin_unlock_irqrestore(&dev->power.lock, flags);
819 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
822 * __pm_runtime_resume - Entry point for runtime resume operations.
823 * @dev: Device to resume.
824 * @rpmflags: Flag bits.
826 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
827 * carry out a resume, either synchronous or asynchronous.
829 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
830 * or if pm_runtime_irq_safe() has been called.
832 int __pm_runtime_resume(struct device *dev, int rpmflags)
837 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
839 if (rpmflags & RPM_GET_PUT)
840 atomic_inc(&dev->power.usage_count);
842 spin_lock_irqsave(&dev->power.lock, flags);
843 retval = rpm_resume(dev, rpmflags);
844 spin_unlock_irqrestore(&dev->power.lock, flags);
848 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
851 * __pm_runtime_set_status - Set runtime PM status of a device.
852 * @dev: Device to handle.
853 * @status: New runtime PM status of the device.
855 * If runtime PM of the device is disabled or its power.runtime_error field is
856 * different from zero, the status may be changed either to RPM_ACTIVE, or to
857 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
858 * However, if the device has a parent and the parent is not active, and the
859 * parent's power.ignore_children flag is unset, the device's status cannot be
860 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
862 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
863 * and the device parent's counter of unsuspended children is modified to
864 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
865 * notification request for the parent is submitted.
867 int __pm_runtime_set_status(struct device *dev, unsigned int status)
869 struct device *parent = dev->parent;
871 bool notify_parent = false;
874 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
877 spin_lock_irqsave(&dev->power.lock, flags);
879 if (!dev->power.runtime_error && !dev->power.disable_depth) {
884 if (dev->power.runtime_status == status)
887 if (status == RPM_SUSPENDED) {
888 /* It always is possible to set the status to 'suspended'. */
890 atomic_add_unless(&parent->power.child_count, -1, 0);
891 notify_parent = !parent->power.ignore_children;
897 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
900 * It is invalid to put an active child under a parent that is
901 * not active, has runtime PM enabled and the
902 * 'power.ignore_children' flag unset.
904 if (!parent->power.disable_depth
905 && !parent->power.ignore_children
906 && parent->power.runtime_status != RPM_ACTIVE)
908 else if (dev->power.runtime_status == RPM_SUSPENDED)
909 atomic_inc(&parent->power.child_count);
911 spin_unlock(&parent->power.lock);
918 __update_runtime_status(dev, status);
919 dev->power.runtime_error = 0;
921 spin_unlock_irqrestore(&dev->power.lock, flags);
924 pm_request_idle(parent);
928 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
931 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
932 * @dev: Device to handle.
934 * Flush all pending requests for the device from pm_wq and wait for all
935 * runtime PM operations involving the device in progress to complete.
937 * Should be called under dev->power.lock with interrupts disabled.
939 static void __pm_runtime_barrier(struct device *dev)
941 pm_runtime_deactivate_timer(dev);
943 if (dev->power.request_pending) {
944 dev->power.request = RPM_REQ_NONE;
945 spin_unlock_irq(&dev->power.lock);
947 cancel_work_sync(&dev->power.work);
949 spin_lock_irq(&dev->power.lock);
950 dev->power.request_pending = false;
953 if (dev->power.runtime_status == RPM_SUSPENDING
954 || dev->power.runtime_status == RPM_RESUMING
955 || dev->power.idle_notification) {
958 /* Suspend, wake-up or idle notification in progress. */
960 prepare_to_wait(&dev->power.wait_queue, &wait,
961 TASK_UNINTERRUPTIBLE);
962 if (dev->power.runtime_status != RPM_SUSPENDING
963 && dev->power.runtime_status != RPM_RESUMING
964 && !dev->power.idle_notification)
966 spin_unlock_irq(&dev->power.lock);
970 spin_lock_irq(&dev->power.lock);
972 finish_wait(&dev->power.wait_queue, &wait);
977 * pm_runtime_barrier - Flush pending requests and wait for completions.
978 * @dev: Device to handle.
980 * Prevent the device from being suspended by incrementing its usage counter and
981 * if there's a pending resume request for the device, wake the device up.
982 * Next, make sure that all pending requests for the device have been flushed
983 * from pm_wq and wait for all runtime PM operations involving the device in
984 * progress to complete.
987 * 1, if there was a resume request pending and the device had to be woken up,
990 int pm_runtime_barrier(struct device *dev)
994 pm_runtime_get_noresume(dev);
995 spin_lock_irq(&dev->power.lock);
997 if (dev->power.request_pending
998 && dev->power.request == RPM_REQ_RESUME) {
1003 __pm_runtime_barrier(dev);
1005 spin_unlock_irq(&dev->power.lock);
1006 pm_runtime_put_noidle(dev);
1010 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1013 * __pm_runtime_disable - Disable runtime PM of a device.
1014 * @dev: Device to handle.
1015 * @check_resume: If set, check if there's a resume request for the device.
1017 * Increment power.disable_depth for the device and if was zero previously,
1018 * cancel all pending runtime PM requests for the device and wait for all
1019 * operations in progress to complete. The device can be either active or
1020 * suspended after its runtime PM has been disabled.
1022 * If @check_resume is set and there's a resume request pending when
1023 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1024 * function will wake up the device before disabling its runtime PM.
1026 void __pm_runtime_disable(struct device *dev, bool check_resume)
1028 spin_lock_irq(&dev->power.lock);
1030 if (dev->power.disable_depth > 0) {
1031 dev->power.disable_depth++;
1036 * Wake up the device if there's a resume request pending, because that
1037 * means there probably is some I/O to process and disabling runtime PM
1038 * shouldn't prevent the device from processing the I/O.
1040 if (check_resume && dev->power.request_pending
1041 && dev->power.request == RPM_REQ_RESUME) {
1043 * Prevent suspends and idle notifications from being carried
1044 * out after we have woken up the device.
1046 pm_runtime_get_noresume(dev);
1050 pm_runtime_put_noidle(dev);
1053 if (!dev->power.disable_depth++)
1054 __pm_runtime_barrier(dev);
1057 spin_unlock_irq(&dev->power.lock);
1059 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1062 * pm_runtime_enable - Enable runtime PM of a device.
1063 * @dev: Device to handle.
1065 void pm_runtime_enable(struct device *dev)
1067 unsigned long flags;
1069 spin_lock_irqsave(&dev->power.lock, flags);
1071 if (dev->power.disable_depth > 0)
1072 dev->power.disable_depth--;
1074 dev_warn(dev, "Unbalanced %s!\n", __func__);
1076 spin_unlock_irqrestore(&dev->power.lock, flags);
1078 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1081 * pm_runtime_forbid - Block runtime PM of a device.
1082 * @dev: Device to handle.
1084 * Increase the device's usage count and clear its power.runtime_auto flag,
1085 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1088 void pm_runtime_forbid(struct device *dev)
1090 spin_lock_irq(&dev->power.lock);
1091 if (!dev->power.runtime_auto)
1094 dev->power.runtime_auto = false;
1095 atomic_inc(&dev->power.usage_count);
1099 spin_unlock_irq(&dev->power.lock);
1101 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1104 * pm_runtime_allow - Unblock runtime PM of a device.
1105 * @dev: Device to handle.
1107 * Decrease the device's usage count and set its power.runtime_auto flag.
1109 void pm_runtime_allow(struct device *dev)
1111 spin_lock_irq(&dev->power.lock);
1112 if (dev->power.runtime_auto)
1115 dev->power.runtime_auto = true;
1116 if (atomic_dec_and_test(&dev->power.usage_count))
1117 rpm_idle(dev, RPM_AUTO);
1120 spin_unlock_irq(&dev->power.lock);
1122 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1125 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1126 * @dev: Device to handle.
1128 * Set the power.no_callbacks flag, which tells the PM core that this
1129 * device is power-managed through its parent and has no runtime PM
1130 * callbacks of its own. The runtime sysfs attributes will be removed.
1132 void pm_runtime_no_callbacks(struct device *dev)
1134 spin_lock_irq(&dev->power.lock);
1135 dev->power.no_callbacks = 1;
1136 spin_unlock_irq(&dev->power.lock);
1137 if (device_is_registered(dev))
1138 rpm_sysfs_remove(dev);
1140 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1143 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1144 * @dev: Device to handle
1146 * Set the power.irq_safe flag, which tells the PM core that the
1147 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1148 * always be invoked with the spinlock held and interrupts disabled. It also
1149 * causes the parent's usage counter to be permanently incremented, preventing
1150 * the parent from runtime suspending -- otherwise an irq-safe child might have
1151 * to wait for a non-irq-safe parent.
1153 void pm_runtime_irq_safe(struct device *dev)
1156 pm_runtime_get_sync(dev->parent);
1157 spin_lock_irq(&dev->power.lock);
1158 dev->power.irq_safe = 1;
1159 spin_unlock_irq(&dev->power.lock);
1161 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1164 * update_autosuspend - Handle a change to a device's autosuspend settings.
1165 * @dev: Device to handle.
1166 * @old_delay: The former autosuspend_delay value.
1167 * @old_use: The former use_autosuspend value.
1169 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1170 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1172 * This function must be called under dev->power.lock with interrupts disabled.
1174 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1176 int delay = dev->power.autosuspend_delay;
1178 /* Should runtime suspend be prevented now? */
1179 if (dev->power.use_autosuspend && delay < 0) {
1181 /* If it used to be allowed then prevent it. */
1182 if (!old_use || old_delay >= 0) {
1183 atomic_inc(&dev->power.usage_count);
1188 /* Runtime suspend should be allowed now. */
1191 /* If it used to be prevented then allow it. */
1192 if (old_use && old_delay < 0)
1193 atomic_dec(&dev->power.usage_count);
1195 /* Maybe we can autosuspend now. */
1196 rpm_idle(dev, RPM_AUTO);
1201 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1202 * @dev: Device to handle.
1203 * @delay: Value of the new delay in milliseconds.
1205 * Set the device's power.autosuspend_delay value. If it changes to negative
1206 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1207 * changes the other way, allow runtime suspends.
1209 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1211 int old_delay, old_use;
1213 spin_lock_irq(&dev->power.lock);
1214 old_delay = dev->power.autosuspend_delay;
1215 old_use = dev->power.use_autosuspend;
1216 dev->power.autosuspend_delay = delay;
1217 update_autosuspend(dev, old_delay, old_use);
1218 spin_unlock_irq(&dev->power.lock);
1220 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1223 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1224 * @dev: Device to handle.
1225 * @use: New value for use_autosuspend.
1227 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1228 * suspends as needed.
1230 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1232 int old_delay, old_use;
1234 spin_lock_irq(&dev->power.lock);
1235 old_delay = dev->power.autosuspend_delay;
1236 old_use = dev->power.use_autosuspend;
1237 dev->power.use_autosuspend = use;
1238 update_autosuspend(dev, old_delay, old_use);
1239 spin_unlock_irq(&dev->power.lock);
1241 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1244 * pm_runtime_init - Initialize runtime PM fields in given device object.
1245 * @dev: Device object to initialize.
1247 void pm_runtime_init(struct device *dev)
1249 dev->power.runtime_status = RPM_SUSPENDED;
1250 dev->power.idle_notification = false;
1252 dev->power.disable_depth = 1;
1253 atomic_set(&dev->power.usage_count, 0);
1255 dev->power.runtime_error = 0;
1257 atomic_set(&dev->power.child_count, 0);
1258 pm_suspend_ignore_children(dev, false);
1259 dev->power.runtime_auto = true;
1261 dev->power.request_pending = false;
1262 dev->power.request = RPM_REQ_NONE;
1263 dev->power.deferred_resume = false;
1264 dev->power.accounting_timestamp = jiffies;
1265 INIT_WORK(&dev->power.work, pm_runtime_work);
1267 dev->power.timer_expires = 0;
1268 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1269 (unsigned long)dev);
1271 init_waitqueue_head(&dev->power.wait_queue);
1275 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1276 * @dev: Device object being removed from device hierarchy.
1278 void pm_runtime_remove(struct device *dev)
1280 __pm_runtime_disable(dev, false);
1282 /* Change the status back to 'suspended' to match the initial status. */
1283 if (dev->power.runtime_status == RPM_ACTIVE)
1284 pm_runtime_set_suspended(dev);
1285 if (dev->power.irq_safe && dev->parent)
1286 pm_runtime_put_sync(dev->parent);