2 * drivers/base/power/runtime.c - Helper functions for device runtime PM
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
7 * This file is released under the GPLv2.
10 #include <linux/sched.h>
11 #include <linux/export.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_wakeirq.h>
14 #include <trace/events/rpm.h>
17 typedef int (*pm_callback_t)(struct device *);
19 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
22 const struct dev_pm_ops *ops;
25 ops = &dev->pm_domain->ops;
26 else if (dev->type && dev->type->pm)
28 else if (dev->class && dev->class->pm)
30 else if (dev->bus && dev->bus->pm)
36 cb = *(pm_callback_t *)((void *)ops + cb_offset);
40 if (!cb && dev->driver && dev->driver->pm)
41 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
46 #define RPM_GET_CALLBACK(dev, callback) \
47 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
49 static int rpm_resume(struct device *dev, int rpmflags);
50 static int rpm_suspend(struct device *dev, int rpmflags);
53 * update_pm_runtime_accounting - Update the time accounting of power states
54 * @dev: Device to update the accounting for
56 * In order to be able to have time accounting of the various power states
57 * (as used by programs such as PowerTOP to show the effectiveness of runtime
58 * PM), we need to track the time spent in each state.
59 * update_pm_runtime_accounting must be called each time before the
60 * runtime_status field is updated, to account the time in the old state
63 void update_pm_runtime_accounting(struct device *dev)
65 unsigned long now = jiffies;
68 delta = now - dev->power.accounting_timestamp;
70 dev->power.accounting_timestamp = now;
72 if (dev->power.disable_depth > 0)
75 if (dev->power.runtime_status == RPM_SUSPENDED)
76 dev->power.suspended_jiffies += delta;
78 dev->power.active_jiffies += delta;
81 static void __update_runtime_status(struct device *dev, enum rpm_status status)
83 update_pm_runtime_accounting(dev);
84 dev->power.runtime_status = status;
88 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
89 * @dev: Device to handle.
91 static void pm_runtime_deactivate_timer(struct device *dev)
93 if (dev->power.timer_expires > 0) {
94 del_timer(&dev->power.suspend_timer);
95 dev->power.timer_expires = 0;
100 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
101 * @dev: Device to handle.
103 static void pm_runtime_cancel_pending(struct device *dev)
105 pm_runtime_deactivate_timer(dev);
107 * In case there's a request pending, make sure its work function will
108 * return without doing anything.
110 dev->power.request = RPM_REQ_NONE;
114 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
115 * @dev: Device to handle.
117 * Compute the autosuspend-delay expiration time based on the device's
118 * power.last_busy time. If the delay has already expired or is disabled
119 * (negative) or the power.use_autosuspend flag isn't set, return 0.
120 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
122 * This function may be called either with or without dev->power.lock held.
123 * Either way it can be racy, since power.last_busy may be updated at any time.
125 unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
127 int autosuspend_delay;
129 unsigned long last_busy;
130 unsigned long expires = 0;
132 if (!dev->power.use_autosuspend)
135 autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
136 if (autosuspend_delay < 0)
139 last_busy = ACCESS_ONCE(dev->power.last_busy);
140 elapsed = jiffies - last_busy;
142 goto out; /* jiffies has wrapped around. */
145 * If the autosuspend_delay is >= 1 second, align the timer by rounding
146 * up to the nearest second.
148 expires = last_busy + msecs_to_jiffies(autosuspend_delay);
149 if (autosuspend_delay >= 1000)
150 expires = round_jiffies(expires);
152 if (elapsed >= expires - last_busy)
153 expires = 0; /* Already expired. */
158 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
160 static int dev_memalloc_noio(struct device *dev, void *data)
162 return dev->power.memalloc_noio;
166 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
167 * @dev: Device to handle.
168 * @enable: True for setting the flag and False for clearing the flag.
170 * Set the flag for all devices in the path from the device to the
171 * root device in the device tree if @enable is true, otherwise clear
172 * the flag for devices in the path whose siblings don't set the flag.
174 * The function should only be called by block device, or network
175 * device driver for solving the deadlock problem during runtime
178 * If memory allocation with GFP_KERNEL is called inside runtime
179 * resume/suspend callback of any one of its ancestors(or the
180 * block device itself), the deadlock may be triggered inside the
181 * memory allocation since it might not complete until the block
182 * device becomes active and the involed page I/O finishes. The
183 * situation is pointed out first by Alan Stern. Network device
184 * are involved in iSCSI kind of situation.
186 * The lock of dev_hotplug_mutex is held in the function for handling
187 * hotplug race because pm_runtime_set_memalloc_noio() may be called
190 * The function should be called between device_add() and device_del()
191 * on the affected device(block/network device).
193 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
195 static DEFINE_MUTEX(dev_hotplug_mutex);
197 mutex_lock(&dev_hotplug_mutex);
201 /* hold power lock since bitfield is not SMP-safe. */
202 spin_lock_irq(&dev->power.lock);
203 enabled = dev->power.memalloc_noio;
204 dev->power.memalloc_noio = enable;
205 spin_unlock_irq(&dev->power.lock);
208 * not need to enable ancestors any more if the device
211 if (enabled && enable)
217 * clear flag of the parent device only if all the
218 * children don't set the flag because ancestor's
219 * flag was set by any one of the descendants.
221 if (!dev || (!enable &&
222 device_for_each_child(dev, NULL,
226 mutex_unlock(&dev_hotplug_mutex);
228 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
231 * rpm_check_suspend_allowed - Test whether a device may be suspended.
232 * @dev: Device to test.
234 static int rpm_check_suspend_allowed(struct device *dev)
238 if (dev->power.runtime_error)
240 else if (dev->power.disable_depth > 0)
242 else if (atomic_read(&dev->power.usage_count) > 0)
244 else if (!dev->power.ignore_children &&
245 atomic_read(&dev->power.child_count))
248 /* Pending resume requests take precedence over suspends. */
249 else if ((dev->power.deferred_resume
250 && dev->power.runtime_status == RPM_SUSPENDING)
251 || (dev->power.request_pending
252 && dev->power.request == RPM_REQ_RESUME))
254 else if (__dev_pm_qos_read_value(dev) < 0)
256 else if (dev->power.runtime_status == RPM_SUSPENDED)
263 * __rpm_callback - Run a given runtime PM callback for a given device.
264 * @cb: Runtime PM callback to run.
265 * @dev: Device to run the callback for.
267 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
268 __releases(&dev->power.lock) __acquires(&dev->power.lock)
272 if (dev->power.irq_safe)
273 spin_unlock(&dev->power.lock);
275 spin_unlock_irq(&dev->power.lock);
279 if (dev->power.irq_safe)
280 spin_lock(&dev->power.lock);
282 spin_lock_irq(&dev->power.lock);
288 * rpm_idle - Notify device bus type if the device can be suspended.
289 * @dev: Device to notify the bus type about.
290 * @rpmflags: Flag bits.
292 * Check if the device's runtime PM status allows it to be suspended. If
293 * another idle notification has been started earlier, return immediately. If
294 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
295 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
296 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
298 * This function must be called under dev->power.lock with interrupts disabled.
300 static int rpm_idle(struct device *dev, int rpmflags)
302 int (*callback)(struct device *);
305 trace_rpm_idle_rcuidle(dev, rpmflags);
306 retval = rpm_check_suspend_allowed(dev);
308 ; /* Conditions are wrong. */
310 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
311 else if (dev->power.runtime_status != RPM_ACTIVE)
315 * Any pending request other than an idle notification takes
316 * precedence over us, except that the timer may be running.
318 else if (dev->power.request_pending &&
319 dev->power.request > RPM_REQ_IDLE)
322 /* Act as though RPM_NOWAIT is always set. */
323 else if (dev->power.idle_notification)
324 retval = -EINPROGRESS;
328 /* Pending requests need to be canceled. */
329 dev->power.request = RPM_REQ_NONE;
331 if (dev->power.no_callbacks)
334 /* Carry out an asynchronous or a synchronous idle notification. */
335 if (rpmflags & RPM_ASYNC) {
336 dev->power.request = RPM_REQ_IDLE;
337 if (!dev->power.request_pending) {
338 dev->power.request_pending = true;
339 queue_work(pm_wq, &dev->power.work);
341 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
345 dev->power.idle_notification = true;
347 callback = RPM_GET_CALLBACK(dev, runtime_idle);
350 retval = __rpm_callback(callback, dev);
352 dev->power.idle_notification = false;
353 wake_up_all(&dev->power.wait_queue);
356 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
357 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
361 * rpm_callback - Run a given runtime PM callback for a given device.
362 * @cb: Runtime PM callback to run.
363 * @dev: Device to run the callback for.
365 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
372 if (dev->power.memalloc_noio) {
373 unsigned int noio_flag;
376 * Deadlock might be caused if memory allocation with
377 * GFP_KERNEL happens inside runtime_suspend and
378 * runtime_resume callbacks of one block device's
379 * ancestor or the block device itself. Network
380 * device might be thought as part of iSCSI block
381 * device, so network device and its ancestor should
382 * be marked as memalloc_noio too.
384 noio_flag = memalloc_noio_save();
385 retval = __rpm_callback(cb, dev);
386 memalloc_noio_restore(noio_flag);
388 retval = __rpm_callback(cb, dev);
391 dev->power.runtime_error = retval;
392 return retval != -EACCES ? retval : -EIO;
396 * rpm_suspend - Carry out runtime suspend of given device.
397 * @dev: Device to suspend.
398 * @rpmflags: Flag bits.
400 * Check if the device's runtime PM status allows it to be suspended.
401 * Cancel a pending idle notification, autosuspend or suspend. If
402 * another suspend has been started earlier, either return immediately
403 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
404 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
405 * otherwise run the ->runtime_suspend() callback directly. When
406 * ->runtime_suspend succeeded, if a deferred resume was requested while
407 * the callback was running then carry it out, otherwise send an idle
408 * notification for its parent (if the suspend succeeded and both
409 * ignore_children of parent->power and irq_safe of dev->power are not set).
410 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
411 * flag is set and the next autosuspend-delay expiration time is in the
412 * future, schedule another autosuspend attempt.
414 * This function must be called under dev->power.lock with interrupts disabled.
416 static int rpm_suspend(struct device *dev, int rpmflags)
417 __releases(&dev->power.lock) __acquires(&dev->power.lock)
419 int (*callback)(struct device *);
420 struct device *parent = NULL;
423 trace_rpm_suspend_rcuidle(dev, rpmflags);
426 retval = rpm_check_suspend_allowed(dev);
429 ; /* Conditions are wrong. */
431 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
432 else if (dev->power.runtime_status == RPM_RESUMING &&
433 !(rpmflags & RPM_ASYNC))
438 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
439 if ((rpmflags & RPM_AUTO)
440 && dev->power.runtime_status != RPM_SUSPENDING) {
441 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
444 /* Pending requests need to be canceled. */
445 dev->power.request = RPM_REQ_NONE;
448 * Optimization: If the timer is already running and is
449 * set to expire at or before the autosuspend delay,
450 * avoid the overhead of resetting it. Just let it
451 * expire; pm_suspend_timer_fn() will take care of the
454 if (!(dev->power.timer_expires && time_before_eq(
455 dev->power.timer_expires, expires))) {
456 dev->power.timer_expires = expires;
457 mod_timer(&dev->power.suspend_timer, expires);
459 dev->power.timer_autosuspends = 1;
464 /* Other scheduled or pending requests need to be canceled. */
465 pm_runtime_cancel_pending(dev);
467 if (dev->power.runtime_status == RPM_SUSPENDING) {
470 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
471 retval = -EINPROGRESS;
475 if (dev->power.irq_safe) {
476 spin_unlock(&dev->power.lock);
480 spin_lock(&dev->power.lock);
484 /* Wait for the other suspend running in parallel with us. */
486 prepare_to_wait(&dev->power.wait_queue, &wait,
487 TASK_UNINTERRUPTIBLE);
488 if (dev->power.runtime_status != RPM_SUSPENDING)
491 spin_unlock_irq(&dev->power.lock);
495 spin_lock_irq(&dev->power.lock);
497 finish_wait(&dev->power.wait_queue, &wait);
501 if (dev->power.no_callbacks)
502 goto no_callback; /* Assume success. */
504 /* Carry out an asynchronous or a synchronous suspend. */
505 if (rpmflags & RPM_ASYNC) {
506 dev->power.request = (rpmflags & RPM_AUTO) ?
507 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
508 if (!dev->power.request_pending) {
509 dev->power.request_pending = true;
510 queue_work(pm_wq, &dev->power.work);
515 __update_runtime_status(dev, RPM_SUSPENDING);
517 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
519 dev_pm_enable_wake_irq(dev);
520 retval = rpm_callback(callback, dev);
525 __update_runtime_status(dev, RPM_SUSPENDED);
526 pm_runtime_deactivate_timer(dev);
529 parent = dev->parent;
530 atomic_add_unless(&parent->power.child_count, -1, 0);
532 wake_up_all(&dev->power.wait_queue);
534 if (dev->power.deferred_resume) {
535 dev->power.deferred_resume = false;
541 /* Maybe the parent is now able to suspend. */
542 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
543 spin_unlock(&dev->power.lock);
545 spin_lock(&parent->power.lock);
546 rpm_idle(parent, RPM_ASYNC);
547 spin_unlock(&parent->power.lock);
549 spin_lock(&dev->power.lock);
553 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
558 dev_pm_disable_wake_irq(dev);
559 __update_runtime_status(dev, RPM_ACTIVE);
560 dev->power.deferred_resume = false;
561 wake_up_all(&dev->power.wait_queue);
563 if (retval == -EAGAIN || retval == -EBUSY) {
564 dev->power.runtime_error = 0;
567 * If the callback routine failed an autosuspend, and
568 * if the last_busy time has been updated so that there
569 * is a new autosuspend expiration time, automatically
570 * reschedule another autosuspend.
572 if ((rpmflags & RPM_AUTO) &&
573 pm_runtime_autosuspend_expiration(dev) != 0)
576 pm_runtime_cancel_pending(dev);
582 * rpm_resume - Carry out runtime resume of given device.
583 * @dev: Device to resume.
584 * @rpmflags: Flag bits.
586 * Check if the device's runtime PM status allows it to be resumed. Cancel
587 * any scheduled or pending requests. If another resume has been started
588 * earlier, either return immediately or wait for it to finish, depending on the
589 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
590 * parallel with this function, either tell the other process to resume after
591 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
592 * flag is set then queue a resume request; otherwise run the
593 * ->runtime_resume() callback directly. Queue an idle notification for the
594 * device if the resume succeeded.
596 * This function must be called under dev->power.lock with interrupts disabled.
598 static int rpm_resume(struct device *dev, int rpmflags)
599 __releases(&dev->power.lock) __acquires(&dev->power.lock)
601 int (*callback)(struct device *);
602 struct device *parent = NULL;
605 trace_rpm_resume_rcuidle(dev, rpmflags);
608 if (dev->power.runtime_error)
610 else if (dev->power.disable_depth == 1 && dev->power.is_suspended
611 && dev->power.runtime_status == RPM_ACTIVE)
613 else if (dev->power.disable_depth > 0)
619 * Other scheduled or pending requests need to be canceled. Small
620 * optimization: If an autosuspend timer is running, leave it running
621 * rather than cancelling it now only to restart it again in the near
624 dev->power.request = RPM_REQ_NONE;
625 if (!dev->power.timer_autosuspends)
626 pm_runtime_deactivate_timer(dev);
628 if (dev->power.runtime_status == RPM_ACTIVE) {
633 if (dev->power.runtime_status == RPM_RESUMING
634 || dev->power.runtime_status == RPM_SUSPENDING) {
637 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
638 if (dev->power.runtime_status == RPM_SUSPENDING)
639 dev->power.deferred_resume = true;
641 retval = -EINPROGRESS;
645 if (dev->power.irq_safe) {
646 spin_unlock(&dev->power.lock);
650 spin_lock(&dev->power.lock);
654 /* Wait for the operation carried out in parallel with us. */
656 prepare_to_wait(&dev->power.wait_queue, &wait,
657 TASK_UNINTERRUPTIBLE);
658 if (dev->power.runtime_status != RPM_RESUMING
659 && dev->power.runtime_status != RPM_SUSPENDING)
662 spin_unlock_irq(&dev->power.lock);
666 spin_lock_irq(&dev->power.lock);
668 finish_wait(&dev->power.wait_queue, &wait);
673 * See if we can skip waking up the parent. This is safe only if
674 * power.no_callbacks is set, because otherwise we don't know whether
675 * the resume will actually succeed.
677 if (dev->power.no_callbacks && !parent && dev->parent) {
678 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
679 if (dev->parent->power.disable_depth > 0
680 || dev->parent->power.ignore_children
681 || dev->parent->power.runtime_status == RPM_ACTIVE) {
682 atomic_inc(&dev->parent->power.child_count);
683 spin_unlock(&dev->parent->power.lock);
685 goto no_callback; /* Assume success. */
687 spin_unlock(&dev->parent->power.lock);
690 /* Carry out an asynchronous or a synchronous resume. */
691 if (rpmflags & RPM_ASYNC) {
692 dev->power.request = RPM_REQ_RESUME;
693 if (!dev->power.request_pending) {
694 dev->power.request_pending = true;
695 queue_work(pm_wq, &dev->power.work);
701 if (!parent && dev->parent) {
703 * Increment the parent's usage counter and resume it if
704 * necessary. Not needed if dev is irq-safe; then the
705 * parent is permanently resumed.
707 parent = dev->parent;
708 if (dev->power.irq_safe)
710 spin_unlock(&dev->power.lock);
712 pm_runtime_get_noresume(parent);
714 spin_lock(&parent->power.lock);
716 * Resume the parent if it has runtime PM enabled and not been
717 * set to ignore its children.
719 if (!parent->power.disable_depth
720 && !parent->power.ignore_children) {
721 rpm_resume(parent, 0);
722 if (parent->power.runtime_status != RPM_ACTIVE)
725 spin_unlock(&parent->power.lock);
727 spin_lock(&dev->power.lock);
734 if (dev->power.no_callbacks)
735 goto no_callback; /* Assume success. */
737 __update_runtime_status(dev, RPM_RESUMING);
739 callback = RPM_GET_CALLBACK(dev, runtime_resume);
741 dev_pm_disable_wake_irq(dev);
742 retval = rpm_callback(callback, dev);
744 __update_runtime_status(dev, RPM_SUSPENDED);
745 pm_runtime_cancel_pending(dev);
746 dev_pm_enable_wake_irq(dev);
749 __update_runtime_status(dev, RPM_ACTIVE);
750 pm_runtime_mark_last_busy(dev);
752 atomic_inc(&parent->power.child_count);
754 wake_up_all(&dev->power.wait_queue);
757 rpm_idle(dev, RPM_ASYNC);
760 if (parent && !dev->power.irq_safe) {
761 spin_unlock_irq(&dev->power.lock);
763 pm_runtime_put(parent);
765 spin_lock_irq(&dev->power.lock);
768 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
774 * pm_runtime_work - Universal runtime PM work function.
775 * @work: Work structure used for scheduling the execution of this function.
777 * Use @work to get the device object the work is to be done for, determine what
778 * is to be done and execute the appropriate runtime PM function.
780 static void pm_runtime_work(struct work_struct *work)
782 struct device *dev = container_of(work, struct device, power.work);
783 enum rpm_request req;
785 spin_lock_irq(&dev->power.lock);
787 if (!dev->power.request_pending)
790 req = dev->power.request;
791 dev->power.request = RPM_REQ_NONE;
792 dev->power.request_pending = false;
798 rpm_idle(dev, RPM_NOWAIT);
800 case RPM_REQ_SUSPEND:
801 rpm_suspend(dev, RPM_NOWAIT);
803 case RPM_REQ_AUTOSUSPEND:
804 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
807 rpm_resume(dev, RPM_NOWAIT);
812 spin_unlock_irq(&dev->power.lock);
816 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
817 * @data: Device pointer passed by pm_schedule_suspend().
819 * Check if the time is right and queue a suspend request.
821 static void pm_suspend_timer_fn(unsigned long data)
823 struct device *dev = (struct device *)data;
825 unsigned long expires;
827 spin_lock_irqsave(&dev->power.lock, flags);
829 expires = dev->power.timer_expires;
830 /* If 'expire' is after 'jiffies' we've been called too early. */
831 if (expires > 0 && !time_after(expires, jiffies)) {
832 dev->power.timer_expires = 0;
833 rpm_suspend(dev, dev->power.timer_autosuspends ?
834 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
837 spin_unlock_irqrestore(&dev->power.lock, flags);
841 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
842 * @dev: Device to suspend.
843 * @delay: Time to wait before submitting a suspend request, in milliseconds.
845 int pm_schedule_suspend(struct device *dev, unsigned int delay)
850 spin_lock_irqsave(&dev->power.lock, flags);
853 retval = rpm_suspend(dev, RPM_ASYNC);
857 retval = rpm_check_suspend_allowed(dev);
861 /* Other scheduled or pending requests need to be canceled. */
862 pm_runtime_cancel_pending(dev);
864 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
865 dev->power.timer_expires += !dev->power.timer_expires;
866 dev->power.timer_autosuspends = 0;
867 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
870 spin_unlock_irqrestore(&dev->power.lock, flags);
874 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
877 * __pm_runtime_idle - Entry point for runtime idle operations.
878 * @dev: Device to send idle notification for.
879 * @rpmflags: Flag bits.
881 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
882 * return immediately if it is larger than zero. Then carry out an idle
883 * notification, either synchronous or asynchronous.
885 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
886 * or if pm_runtime_irq_safe() has been called.
888 int __pm_runtime_idle(struct device *dev, int rpmflags)
893 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
895 if (rpmflags & RPM_GET_PUT) {
896 if (!atomic_dec_and_test(&dev->power.usage_count))
900 spin_lock_irqsave(&dev->power.lock, flags);
901 retval = rpm_idle(dev, rpmflags);
902 spin_unlock_irqrestore(&dev->power.lock, flags);
906 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
909 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
910 * @dev: Device to suspend.
911 * @rpmflags: Flag bits.
913 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
914 * return immediately if it is larger than zero. Then carry out a suspend,
915 * either synchronous or asynchronous.
917 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
918 * or if pm_runtime_irq_safe() has been called.
920 int __pm_runtime_suspend(struct device *dev, int rpmflags)
925 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
927 if (rpmflags & RPM_GET_PUT) {
928 if (!atomic_dec_and_test(&dev->power.usage_count))
932 spin_lock_irqsave(&dev->power.lock, flags);
933 retval = rpm_suspend(dev, rpmflags);
934 spin_unlock_irqrestore(&dev->power.lock, flags);
938 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
941 * __pm_runtime_resume - Entry point for runtime resume operations.
942 * @dev: Device to resume.
943 * @rpmflags: Flag bits.
945 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
946 * carry out a resume, either synchronous or asynchronous.
948 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
949 * or if pm_runtime_irq_safe() has been called.
951 int __pm_runtime_resume(struct device *dev, int rpmflags)
956 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
958 if (rpmflags & RPM_GET_PUT)
959 atomic_inc(&dev->power.usage_count);
961 spin_lock_irqsave(&dev->power.lock, flags);
962 retval = rpm_resume(dev, rpmflags);
963 spin_unlock_irqrestore(&dev->power.lock, flags);
967 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
970 * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
971 * @dev: Device to handle.
973 * Return -EINVAL if runtime PM is disabled for the device.
975 * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
976 * and the runtime PM usage counter is nonzero, increment the counter and
977 * return 1. Otherwise return 0 without changing the counter.
979 int pm_runtime_get_if_in_use(struct device *dev)
984 spin_lock_irqsave(&dev->power.lock, flags);
985 retval = dev->power.disable_depth > 0 ? -EINVAL :
986 dev->power.runtime_status == RPM_ACTIVE
987 && atomic_inc_not_zero(&dev->power.usage_count);
988 spin_unlock_irqrestore(&dev->power.lock, flags);
991 EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
994 * __pm_runtime_set_status - Set runtime PM status of a device.
995 * @dev: Device to handle.
996 * @status: New runtime PM status of the device.
998 * If runtime PM of the device is disabled or its power.runtime_error field is
999 * different from zero, the status may be changed either to RPM_ACTIVE, or to
1000 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1001 * However, if the device has a parent and the parent is not active, and the
1002 * parent's power.ignore_children flag is unset, the device's status cannot be
1003 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1005 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1006 * and the device parent's counter of unsuspended children is modified to
1007 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
1008 * notification request for the parent is submitted.
1010 int __pm_runtime_set_status(struct device *dev, unsigned int status)
1012 struct device *parent = dev->parent;
1013 unsigned long flags;
1014 bool notify_parent = false;
1017 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1020 spin_lock_irqsave(&dev->power.lock, flags);
1022 if (!dev->power.runtime_error && !dev->power.disable_depth) {
1027 if (dev->power.runtime_status == status)
1030 if (status == RPM_SUSPENDED) {
1032 * It is invalid to suspend a device with an active child,
1033 * unless it has been set to ignore its children.
1035 if (!dev->power.ignore_children &&
1036 atomic_read(&dev->power.child_count)) {
1037 dev_err(dev, "runtime PM trying to suspend device but active child\n");
1043 atomic_add_unless(&parent->power.child_count, -1, 0);
1044 notify_parent = !parent->power.ignore_children;
1050 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1053 * It is invalid to put an active child under a parent that is
1054 * not active, has runtime PM enabled and the
1055 * 'power.ignore_children' flag unset.
1057 if (!parent->power.disable_depth
1058 && !parent->power.ignore_children
1059 && parent->power.runtime_status != RPM_ACTIVE) {
1060 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1064 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1065 atomic_inc(&parent->power.child_count);
1068 spin_unlock(&parent->power.lock);
1075 __update_runtime_status(dev, status);
1076 dev->power.runtime_error = 0;
1078 spin_unlock_irqrestore(&dev->power.lock, flags);
1081 pm_request_idle(parent);
1085 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1088 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1089 * @dev: Device to handle.
1091 * Flush all pending requests for the device from pm_wq and wait for all
1092 * runtime PM operations involving the device in progress to complete.
1094 * Should be called under dev->power.lock with interrupts disabled.
1096 static void __pm_runtime_barrier(struct device *dev)
1098 pm_runtime_deactivate_timer(dev);
1100 if (dev->power.request_pending) {
1101 dev->power.request = RPM_REQ_NONE;
1102 spin_unlock_irq(&dev->power.lock);
1104 cancel_work_sync(&dev->power.work);
1106 spin_lock_irq(&dev->power.lock);
1107 dev->power.request_pending = false;
1110 if (dev->power.runtime_status == RPM_SUSPENDING
1111 || dev->power.runtime_status == RPM_RESUMING
1112 || dev->power.idle_notification) {
1115 /* Suspend, wake-up or idle notification in progress. */
1117 prepare_to_wait(&dev->power.wait_queue, &wait,
1118 TASK_UNINTERRUPTIBLE);
1119 if (dev->power.runtime_status != RPM_SUSPENDING
1120 && dev->power.runtime_status != RPM_RESUMING
1121 && !dev->power.idle_notification)
1123 spin_unlock_irq(&dev->power.lock);
1127 spin_lock_irq(&dev->power.lock);
1129 finish_wait(&dev->power.wait_queue, &wait);
1134 * pm_runtime_barrier - Flush pending requests and wait for completions.
1135 * @dev: Device to handle.
1137 * Prevent the device from being suspended by incrementing its usage counter and
1138 * if there's a pending resume request for the device, wake the device up.
1139 * Next, make sure that all pending requests for the device have been flushed
1140 * from pm_wq and wait for all runtime PM operations involving the device in
1141 * progress to complete.
1144 * 1, if there was a resume request pending and the device had to be woken up,
1147 int pm_runtime_barrier(struct device *dev)
1151 pm_runtime_get_noresume(dev);
1152 spin_lock_irq(&dev->power.lock);
1154 if (dev->power.request_pending
1155 && dev->power.request == RPM_REQ_RESUME) {
1160 __pm_runtime_barrier(dev);
1162 spin_unlock_irq(&dev->power.lock);
1163 pm_runtime_put_noidle(dev);
1167 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1170 * __pm_runtime_disable - Disable runtime PM of a device.
1171 * @dev: Device to handle.
1172 * @check_resume: If set, check if there's a resume request for the device.
1174 * Increment power.disable_depth for the device and if it was zero previously,
1175 * cancel all pending runtime PM requests for the device and wait for all
1176 * operations in progress to complete. The device can be either active or
1177 * suspended after its runtime PM has been disabled.
1179 * If @check_resume is set and there's a resume request pending when
1180 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1181 * function will wake up the device before disabling its runtime PM.
1183 void __pm_runtime_disable(struct device *dev, bool check_resume)
1185 spin_lock_irq(&dev->power.lock);
1187 if (dev->power.disable_depth > 0) {
1188 dev->power.disable_depth++;
1193 * Wake up the device if there's a resume request pending, because that
1194 * means there probably is some I/O to process and disabling runtime PM
1195 * shouldn't prevent the device from processing the I/O.
1197 if (check_resume && dev->power.request_pending
1198 && dev->power.request == RPM_REQ_RESUME) {
1200 * Prevent suspends and idle notifications from being carried
1201 * out after we have woken up the device.
1203 pm_runtime_get_noresume(dev);
1207 pm_runtime_put_noidle(dev);
1210 if (!dev->power.disable_depth++)
1211 __pm_runtime_barrier(dev);
1214 spin_unlock_irq(&dev->power.lock);
1216 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1219 * pm_runtime_enable - Enable runtime PM of a device.
1220 * @dev: Device to handle.
1222 void pm_runtime_enable(struct device *dev)
1224 unsigned long flags;
1226 spin_lock_irqsave(&dev->power.lock, flags);
1228 if (dev->power.disable_depth > 0)
1229 dev->power.disable_depth--;
1231 dev_warn(dev, "Unbalanced %s!\n", __func__);
1233 spin_unlock_irqrestore(&dev->power.lock, flags);
1235 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1238 * pm_runtime_forbid - Block runtime PM of a device.
1239 * @dev: Device to handle.
1241 * Increase the device's usage count and clear its power.runtime_auto flag,
1242 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1245 void pm_runtime_forbid(struct device *dev)
1247 spin_lock_irq(&dev->power.lock);
1248 if (!dev->power.runtime_auto)
1251 dev->power.runtime_auto = false;
1252 atomic_inc(&dev->power.usage_count);
1256 spin_unlock_irq(&dev->power.lock);
1258 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1261 * pm_runtime_allow - Unblock runtime PM of a device.
1262 * @dev: Device to handle.
1264 * Decrease the device's usage count and set its power.runtime_auto flag.
1266 void pm_runtime_allow(struct device *dev)
1268 spin_lock_irq(&dev->power.lock);
1269 if (dev->power.runtime_auto)
1272 dev->power.runtime_auto = true;
1273 if (atomic_dec_and_test(&dev->power.usage_count))
1274 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1277 spin_unlock_irq(&dev->power.lock);
1279 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1282 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1283 * @dev: Device to handle.
1285 * Set the power.no_callbacks flag, which tells the PM core that this
1286 * device is power-managed through its parent and has no runtime PM
1287 * callbacks of its own. The runtime sysfs attributes will be removed.
1289 void pm_runtime_no_callbacks(struct device *dev)
1291 spin_lock_irq(&dev->power.lock);
1292 dev->power.no_callbacks = 1;
1293 spin_unlock_irq(&dev->power.lock);
1294 if (device_is_registered(dev))
1295 rpm_sysfs_remove(dev);
1297 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1300 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1301 * @dev: Device to handle
1303 * Set the power.irq_safe flag, which tells the PM core that the
1304 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1305 * always be invoked with the spinlock held and interrupts disabled. It also
1306 * causes the parent's usage counter to be permanently incremented, preventing
1307 * the parent from runtime suspending -- otherwise an irq-safe child might have
1308 * to wait for a non-irq-safe parent.
1310 void pm_runtime_irq_safe(struct device *dev)
1313 pm_runtime_get_sync(dev->parent);
1314 spin_lock_irq(&dev->power.lock);
1315 dev->power.irq_safe = 1;
1316 spin_unlock_irq(&dev->power.lock);
1318 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1321 * update_autosuspend - Handle a change to a device's autosuspend settings.
1322 * @dev: Device to handle.
1323 * @old_delay: The former autosuspend_delay value.
1324 * @old_use: The former use_autosuspend value.
1326 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1327 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1329 * This function must be called under dev->power.lock with interrupts disabled.
1331 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1333 int delay = dev->power.autosuspend_delay;
1335 /* Should runtime suspend be prevented now? */
1336 if (dev->power.use_autosuspend && delay < 0) {
1338 /* If it used to be allowed then prevent it. */
1339 if (!old_use || old_delay >= 0) {
1340 atomic_inc(&dev->power.usage_count);
1345 /* Runtime suspend should be allowed now. */
1348 /* If it used to be prevented then allow it. */
1349 if (old_use && old_delay < 0)
1350 atomic_dec(&dev->power.usage_count);
1352 /* Maybe we can autosuspend now. */
1353 rpm_idle(dev, RPM_AUTO);
1358 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1359 * @dev: Device to handle.
1360 * @delay: Value of the new delay in milliseconds.
1362 * Set the device's power.autosuspend_delay value. If it changes to negative
1363 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1364 * changes the other way, allow runtime suspends.
1366 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1368 int old_delay, old_use;
1370 spin_lock_irq(&dev->power.lock);
1371 old_delay = dev->power.autosuspend_delay;
1372 old_use = dev->power.use_autosuspend;
1373 dev->power.autosuspend_delay = delay;
1374 update_autosuspend(dev, old_delay, old_use);
1375 spin_unlock_irq(&dev->power.lock);
1377 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1380 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1381 * @dev: Device to handle.
1382 * @use: New value for use_autosuspend.
1384 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1385 * suspends as needed.
1387 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1389 int old_delay, old_use;
1391 spin_lock_irq(&dev->power.lock);
1392 old_delay = dev->power.autosuspend_delay;
1393 old_use = dev->power.use_autosuspend;
1394 dev->power.use_autosuspend = use;
1395 update_autosuspend(dev, old_delay, old_use);
1396 spin_unlock_irq(&dev->power.lock);
1398 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1401 * pm_runtime_init - Initialize runtime PM fields in given device object.
1402 * @dev: Device object to initialize.
1404 void pm_runtime_init(struct device *dev)
1406 dev->power.runtime_status = RPM_SUSPENDED;
1407 dev->power.idle_notification = false;
1409 dev->power.disable_depth = 1;
1410 atomic_set(&dev->power.usage_count, 0);
1412 dev->power.runtime_error = 0;
1414 atomic_set(&dev->power.child_count, 0);
1415 pm_suspend_ignore_children(dev, false);
1416 dev->power.runtime_auto = true;
1418 dev->power.request_pending = false;
1419 dev->power.request = RPM_REQ_NONE;
1420 dev->power.deferred_resume = false;
1421 dev->power.accounting_timestamp = jiffies;
1422 INIT_WORK(&dev->power.work, pm_runtime_work);
1424 dev->power.timer_expires = 0;
1425 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1426 (unsigned long)dev);
1428 init_waitqueue_head(&dev->power.wait_queue);
1432 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1433 * @dev: Device object to re-initialize.
1435 void pm_runtime_reinit(struct device *dev)
1437 if (!pm_runtime_enabled(dev)) {
1438 if (dev->power.runtime_status == RPM_ACTIVE)
1439 pm_runtime_set_suspended(dev);
1440 if (dev->power.irq_safe) {
1441 spin_lock_irq(&dev->power.lock);
1442 dev->power.irq_safe = 0;
1443 spin_unlock_irq(&dev->power.lock);
1445 pm_runtime_put(dev->parent);
1451 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1452 * @dev: Device object being removed from device hierarchy.
1454 void pm_runtime_remove(struct device *dev)
1456 __pm_runtime_disable(dev, false);
1457 pm_runtime_reinit(dev);
1461 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1462 * @dev: Device to suspend.
1464 * Disable runtime PM so we safely can check the device's runtime PM status and
1465 * if it is active, invoke it's .runtime_suspend callback to bring it into
1466 * suspend state. Keep runtime PM disabled to preserve the state unless we
1469 * Typically this function may be invoked from a system suspend callback to make
1470 * sure the device is put into low power state.
1472 int pm_runtime_force_suspend(struct device *dev)
1474 int (*callback)(struct device *);
1477 pm_runtime_disable(dev);
1478 if (pm_runtime_status_suspended(dev))
1481 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1488 ret = callback(dev);
1493 * Increase the runtime PM usage count for the device's parent, in case
1494 * when we find the device being used when system suspend was invoked.
1495 * This informs pm_runtime_force_resume() to resume the parent
1496 * immediately, which is needed to be able to resume its children,
1497 * when not deferring the resume to be managed via runtime PM.
1499 if (dev->parent && atomic_read(&dev->power.usage_count) > 1)
1500 pm_runtime_get_noresume(dev->parent);
1502 pm_runtime_set_suspended(dev);
1505 pm_runtime_enable(dev);
1508 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1511 * pm_runtime_force_resume - Force a device into resume state if needed.
1512 * @dev: Device to resume.
1514 * Prior invoking this function we expect the user to have brought the device
1515 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1516 * those actions and brings the device into full power, if it is expected to be
1517 * used on system resume. To distinguish that, we check whether the runtime PM
1518 * usage count is greater than 1 (the PM core increases the usage count in the
1519 * system PM prepare phase), as that indicates a real user (such as a subsystem,
1520 * driver, userspace, etc.) is using it. If that is the case, the device is
1521 * expected to be used on system resume as well, so then we resume it. In the
1522 * other case, we defer the resume to be managed via runtime PM.
1524 * Typically this function may be invoked from a system resume callback.
1526 int pm_runtime_force_resume(struct device *dev)
1528 int (*callback)(struct device *);
1531 callback = RPM_GET_CALLBACK(dev, runtime_resume);
1538 if (!pm_runtime_status_suspended(dev))
1542 * Decrease the parent's runtime PM usage count, if we increased it
1543 * during system suspend in pm_runtime_force_suspend().
1545 if (atomic_read(&dev->power.usage_count) > 1) {
1547 pm_runtime_put_noidle(dev->parent);
1552 ret = pm_runtime_set_active(dev);
1556 ret = callback(dev);
1558 pm_runtime_set_suspended(dev);
1562 pm_runtime_mark_last_busy(dev);
1564 pm_runtime_enable(dev);
1567 EXPORT_SYMBOL_GPL(pm_runtime_force_resume);