f0d8630893450ae5838000f95864e282be62c96a
[linux-2.6-block.git] / drivers / base / power / runtime.c
1 /*
2  * drivers/base/power/runtime.c - Helper functions for device runtime PM
3  *
4  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5  * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
6  *
7  * This file is released under the GPLv2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/export.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_wakeirq.h>
14 #include <trace/events/rpm.h>
15 #include "power.h"
16
17 typedef int (*pm_callback_t)(struct device *);
18
19 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
20 {
21         pm_callback_t cb;
22         const struct dev_pm_ops *ops;
23
24         if (dev->pm_domain)
25                 ops = &dev->pm_domain->ops;
26         else if (dev->type && dev->type->pm)
27                 ops = dev->type->pm;
28         else if (dev->class && dev->class->pm)
29                 ops = dev->class->pm;
30         else if (dev->bus && dev->bus->pm)
31                 ops = dev->bus->pm;
32         else
33                 ops = NULL;
34
35         if (ops)
36                 cb = *(pm_callback_t *)((void *)ops + cb_offset);
37         else
38                 cb = NULL;
39
40         if (!cb && dev->driver && dev->driver->pm)
41                 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
42
43         return cb;
44 }
45
46 #define RPM_GET_CALLBACK(dev, callback) \
47                 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
48
49 static int rpm_resume(struct device *dev, int rpmflags);
50 static int rpm_suspend(struct device *dev, int rpmflags);
51
52 /**
53  * update_pm_runtime_accounting - Update the time accounting of power states
54  * @dev: Device to update the accounting for
55  *
56  * In order to be able to have time accounting of the various power states
57  * (as used by programs such as PowerTOP to show the effectiveness of runtime
58  * PM), we need to track the time spent in each state.
59  * update_pm_runtime_accounting must be called each time before the
60  * runtime_status field is updated, to account the time in the old state
61  * correctly.
62  */
63 void update_pm_runtime_accounting(struct device *dev)
64 {
65         unsigned long now = jiffies;
66         unsigned long delta;
67
68         delta = now - dev->power.accounting_timestamp;
69
70         dev->power.accounting_timestamp = now;
71
72         if (dev->power.disable_depth > 0)
73                 return;
74
75         if (dev->power.runtime_status == RPM_SUSPENDED)
76                 dev->power.suspended_jiffies += delta;
77         else
78                 dev->power.active_jiffies += delta;
79 }
80
81 static void __update_runtime_status(struct device *dev, enum rpm_status status)
82 {
83         update_pm_runtime_accounting(dev);
84         dev->power.runtime_status = status;
85 }
86
87 /**
88  * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
89  * @dev: Device to handle.
90  */
91 static void pm_runtime_deactivate_timer(struct device *dev)
92 {
93         if (dev->power.timer_expires > 0) {
94                 del_timer(&dev->power.suspend_timer);
95                 dev->power.timer_expires = 0;
96         }
97 }
98
99 /**
100  * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
101  * @dev: Device to handle.
102  */
103 static void pm_runtime_cancel_pending(struct device *dev)
104 {
105         pm_runtime_deactivate_timer(dev);
106         /*
107          * In case there's a request pending, make sure its work function will
108          * return without doing anything.
109          */
110         dev->power.request = RPM_REQ_NONE;
111 }
112
113 /*
114  * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
115  * @dev: Device to handle.
116  *
117  * Compute the autosuspend-delay expiration time based on the device's
118  * power.last_busy time.  If the delay has already expired or is disabled
119  * (negative) or the power.use_autosuspend flag isn't set, return 0.
120  * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
121  *
122  * This function may be called either with or without dev->power.lock held.
123  * Either way it can be racy, since power.last_busy may be updated at any time.
124  */
125 unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
126 {
127         int autosuspend_delay;
128         long elapsed;
129         unsigned long last_busy;
130         unsigned long expires = 0;
131
132         if (!dev->power.use_autosuspend)
133                 goto out;
134
135         autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
136         if (autosuspend_delay < 0)
137                 goto out;
138
139         last_busy = ACCESS_ONCE(dev->power.last_busy);
140         elapsed = jiffies - last_busy;
141         if (elapsed < 0)
142                 goto out;       /* jiffies has wrapped around. */
143
144         /*
145          * If the autosuspend_delay is >= 1 second, align the timer by rounding
146          * up to the nearest second.
147          */
148         expires = last_busy + msecs_to_jiffies(autosuspend_delay);
149         if (autosuspend_delay >= 1000)
150                 expires = round_jiffies(expires);
151         expires += !expires;
152         if (elapsed >= expires - last_busy)
153                 expires = 0;    /* Already expired. */
154
155  out:
156         return expires;
157 }
158 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
159
160 static int dev_memalloc_noio(struct device *dev, void *data)
161 {
162         return dev->power.memalloc_noio;
163 }
164
165 /*
166  * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
167  * @dev: Device to handle.
168  * @enable: True for setting the flag and False for clearing the flag.
169  *
170  * Set the flag for all devices in the path from the device to the
171  * root device in the device tree if @enable is true, otherwise clear
172  * the flag for devices in the path whose siblings don't set the flag.
173  *
174  * The function should only be called by block device, or network
175  * device driver for solving the deadlock problem during runtime
176  * resume/suspend:
177  *
178  *     If memory allocation with GFP_KERNEL is called inside runtime
179  *     resume/suspend callback of any one of its ancestors(or the
180  *     block device itself), the deadlock may be triggered inside the
181  *     memory allocation since it might not complete until the block
182  *     device becomes active and the involed page I/O finishes. The
183  *     situation is pointed out first by Alan Stern. Network device
184  *     are involved in iSCSI kind of situation.
185  *
186  * The lock of dev_hotplug_mutex is held in the function for handling
187  * hotplug race because pm_runtime_set_memalloc_noio() may be called
188  * in async probe().
189  *
190  * The function should be called between device_add() and device_del()
191  * on the affected device(block/network device).
192  */
193 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
194 {
195         static DEFINE_MUTEX(dev_hotplug_mutex);
196
197         mutex_lock(&dev_hotplug_mutex);
198         for (;;) {
199                 bool enabled;
200
201                 /* hold power lock since bitfield is not SMP-safe. */
202                 spin_lock_irq(&dev->power.lock);
203                 enabled = dev->power.memalloc_noio;
204                 dev->power.memalloc_noio = enable;
205                 spin_unlock_irq(&dev->power.lock);
206
207                 /*
208                  * not need to enable ancestors any more if the device
209                  * has been enabled.
210                  */
211                 if (enabled && enable)
212                         break;
213
214                 dev = dev->parent;
215
216                 /*
217                  * clear flag of the parent device only if all the
218                  * children don't set the flag because ancestor's
219                  * flag was set by any one of the descendants.
220                  */
221                 if (!dev || (!enable &&
222                              device_for_each_child(dev, NULL,
223                                                    dev_memalloc_noio)))
224                         break;
225         }
226         mutex_unlock(&dev_hotplug_mutex);
227 }
228 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
229
230 /**
231  * rpm_check_suspend_allowed - Test whether a device may be suspended.
232  * @dev: Device to test.
233  */
234 static int rpm_check_suspend_allowed(struct device *dev)
235 {
236         int retval = 0;
237
238         if (dev->power.runtime_error)
239                 retval = -EINVAL;
240         else if (dev->power.disable_depth > 0)
241                 retval = -EACCES;
242         else if (atomic_read(&dev->power.usage_count) > 0)
243                 retval = -EAGAIN;
244         else if (!dev->power.ignore_children &&
245                         atomic_read(&dev->power.child_count))
246                 retval = -EBUSY;
247
248         /* Pending resume requests take precedence over suspends. */
249         else if ((dev->power.deferred_resume
250                         && dev->power.runtime_status == RPM_SUSPENDING)
251             || (dev->power.request_pending
252                         && dev->power.request == RPM_REQ_RESUME))
253                 retval = -EAGAIN;
254         else if (__dev_pm_qos_read_value(dev) < 0)
255                 retval = -EPERM;
256         else if (dev->power.runtime_status == RPM_SUSPENDED)
257                 retval = 1;
258
259         return retval;
260 }
261
262 /**
263  * __rpm_callback - Run a given runtime PM callback for a given device.
264  * @cb: Runtime PM callback to run.
265  * @dev: Device to run the callback for.
266  */
267 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
268         __releases(&dev->power.lock) __acquires(&dev->power.lock)
269 {
270         int retval;
271
272         if (dev->power.irq_safe)
273                 spin_unlock(&dev->power.lock);
274         else
275                 spin_unlock_irq(&dev->power.lock);
276
277         retval = cb(dev);
278
279         if (dev->power.irq_safe)
280                 spin_lock(&dev->power.lock);
281         else
282                 spin_lock_irq(&dev->power.lock);
283
284         return retval;
285 }
286
287 /**
288  * rpm_idle - Notify device bus type if the device can be suspended.
289  * @dev: Device to notify the bus type about.
290  * @rpmflags: Flag bits.
291  *
292  * Check if the device's runtime PM status allows it to be suspended.  If
293  * another idle notification has been started earlier, return immediately.  If
294  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
295  * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
296  * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
297  *
298  * This function must be called under dev->power.lock with interrupts disabled.
299  */
300 static int rpm_idle(struct device *dev, int rpmflags)
301 {
302         int (*callback)(struct device *);
303         int retval;
304
305         trace_rpm_idle_rcuidle(dev, rpmflags);
306         retval = rpm_check_suspend_allowed(dev);
307         if (retval < 0)
308                 ;       /* Conditions are wrong. */
309
310         /* Idle notifications are allowed only in the RPM_ACTIVE state. */
311         else if (dev->power.runtime_status != RPM_ACTIVE)
312                 retval = -EAGAIN;
313
314         /*
315          * Any pending request other than an idle notification takes
316          * precedence over us, except that the timer may be running.
317          */
318         else if (dev->power.request_pending &&
319             dev->power.request > RPM_REQ_IDLE)
320                 retval = -EAGAIN;
321
322         /* Act as though RPM_NOWAIT is always set. */
323         else if (dev->power.idle_notification)
324                 retval = -EINPROGRESS;
325         if (retval)
326                 goto out;
327
328         /* Pending requests need to be canceled. */
329         dev->power.request = RPM_REQ_NONE;
330
331         if (dev->power.no_callbacks)
332                 goto out;
333
334         /* Carry out an asynchronous or a synchronous idle notification. */
335         if (rpmflags & RPM_ASYNC) {
336                 dev->power.request = RPM_REQ_IDLE;
337                 if (!dev->power.request_pending) {
338                         dev->power.request_pending = true;
339                         queue_work(pm_wq, &dev->power.work);
340                 }
341                 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
342                 return 0;
343         }
344
345         dev->power.idle_notification = true;
346
347         callback = RPM_GET_CALLBACK(dev, runtime_idle);
348
349         if (callback)
350                 retval = __rpm_callback(callback, dev);
351
352         dev->power.idle_notification = false;
353         wake_up_all(&dev->power.wait_queue);
354
355  out:
356         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
357         return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
358 }
359
360 /**
361  * rpm_callback - Run a given runtime PM callback for a given device.
362  * @cb: Runtime PM callback to run.
363  * @dev: Device to run the callback for.
364  */
365 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
366 {
367         int retval;
368
369         if (!cb)
370                 return -ENOSYS;
371
372         if (dev->power.memalloc_noio) {
373                 unsigned int noio_flag;
374
375                 /*
376                  * Deadlock might be caused if memory allocation with
377                  * GFP_KERNEL happens inside runtime_suspend and
378                  * runtime_resume callbacks of one block device's
379                  * ancestor or the block device itself. Network
380                  * device might be thought as part of iSCSI block
381                  * device, so network device and its ancestor should
382                  * be marked as memalloc_noio too.
383                  */
384                 noio_flag = memalloc_noio_save();
385                 retval = __rpm_callback(cb, dev);
386                 memalloc_noio_restore(noio_flag);
387         } else {
388                 retval = __rpm_callback(cb, dev);
389         }
390
391         dev->power.runtime_error = retval;
392         return retval != -EACCES ? retval : -EIO;
393 }
394
395 /**
396  * rpm_suspend - Carry out runtime suspend of given device.
397  * @dev: Device to suspend.
398  * @rpmflags: Flag bits.
399  *
400  * Check if the device's runtime PM status allows it to be suspended.
401  * Cancel a pending idle notification, autosuspend or suspend. If
402  * another suspend has been started earlier, either return immediately
403  * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
404  * flags. If the RPM_ASYNC flag is set then queue a suspend request;
405  * otherwise run the ->runtime_suspend() callback directly. When
406  * ->runtime_suspend succeeded, if a deferred resume was requested while
407  * the callback was running then carry it out, otherwise send an idle
408  * notification for its parent (if the suspend succeeded and both
409  * ignore_children of parent->power and irq_safe of dev->power are not set).
410  * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
411  * flag is set and the next autosuspend-delay expiration time is in the
412  * future, schedule another autosuspend attempt.
413  *
414  * This function must be called under dev->power.lock with interrupts disabled.
415  */
416 static int rpm_suspend(struct device *dev, int rpmflags)
417         __releases(&dev->power.lock) __acquires(&dev->power.lock)
418 {
419         int (*callback)(struct device *);
420         struct device *parent = NULL;
421         int retval;
422
423         trace_rpm_suspend_rcuidle(dev, rpmflags);
424
425  repeat:
426         retval = rpm_check_suspend_allowed(dev);
427
428         if (retval < 0)
429                 ;       /* Conditions are wrong. */
430
431         /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
432         else if (dev->power.runtime_status == RPM_RESUMING &&
433             !(rpmflags & RPM_ASYNC))
434                 retval = -EAGAIN;
435         if (retval)
436                 goto out;
437
438         /* If the autosuspend_delay time hasn't expired yet, reschedule. */
439         if ((rpmflags & RPM_AUTO)
440             && dev->power.runtime_status != RPM_SUSPENDING) {
441                 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
442
443                 if (expires != 0) {
444                         /* Pending requests need to be canceled. */
445                         dev->power.request = RPM_REQ_NONE;
446
447                         /*
448                          * Optimization: If the timer is already running and is
449                          * set to expire at or before the autosuspend delay,
450                          * avoid the overhead of resetting it.  Just let it
451                          * expire; pm_suspend_timer_fn() will take care of the
452                          * rest.
453                          */
454                         if (!(dev->power.timer_expires && time_before_eq(
455                             dev->power.timer_expires, expires))) {
456                                 dev->power.timer_expires = expires;
457                                 mod_timer(&dev->power.suspend_timer, expires);
458                         }
459                         dev->power.timer_autosuspends = 1;
460                         goto out;
461                 }
462         }
463
464         /* Other scheduled or pending requests need to be canceled. */
465         pm_runtime_cancel_pending(dev);
466
467         if (dev->power.runtime_status == RPM_SUSPENDING) {
468                 DEFINE_WAIT(wait);
469
470                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
471                         retval = -EINPROGRESS;
472                         goto out;
473                 }
474
475                 if (dev->power.irq_safe) {
476                         spin_unlock(&dev->power.lock);
477
478                         cpu_relax();
479
480                         spin_lock(&dev->power.lock);
481                         goto repeat;
482                 }
483
484                 /* Wait for the other suspend running in parallel with us. */
485                 for (;;) {
486                         prepare_to_wait(&dev->power.wait_queue, &wait,
487                                         TASK_UNINTERRUPTIBLE);
488                         if (dev->power.runtime_status != RPM_SUSPENDING)
489                                 break;
490
491                         spin_unlock_irq(&dev->power.lock);
492
493                         schedule();
494
495                         spin_lock_irq(&dev->power.lock);
496                 }
497                 finish_wait(&dev->power.wait_queue, &wait);
498                 goto repeat;
499         }
500
501         if (dev->power.no_callbacks)
502                 goto no_callback;       /* Assume success. */
503
504         /* Carry out an asynchronous or a synchronous suspend. */
505         if (rpmflags & RPM_ASYNC) {
506                 dev->power.request = (rpmflags & RPM_AUTO) ?
507                     RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
508                 if (!dev->power.request_pending) {
509                         dev->power.request_pending = true;
510                         queue_work(pm_wq, &dev->power.work);
511                 }
512                 goto out;
513         }
514
515         __update_runtime_status(dev, RPM_SUSPENDING);
516
517         callback = RPM_GET_CALLBACK(dev, runtime_suspend);
518
519         dev_pm_enable_wake_irq(dev);
520         retval = rpm_callback(callback, dev);
521         if (retval)
522                 goto fail;
523
524  no_callback:
525         __update_runtime_status(dev, RPM_SUSPENDED);
526         pm_runtime_deactivate_timer(dev);
527
528         if (dev->parent) {
529                 parent = dev->parent;
530                 atomic_add_unless(&parent->power.child_count, -1, 0);
531         }
532         wake_up_all(&dev->power.wait_queue);
533
534         if (dev->power.deferred_resume) {
535                 dev->power.deferred_resume = false;
536                 rpm_resume(dev, 0);
537                 retval = -EAGAIN;
538                 goto out;
539         }
540
541         /* Maybe the parent is now able to suspend. */
542         if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
543                 spin_unlock(&dev->power.lock);
544
545                 spin_lock(&parent->power.lock);
546                 rpm_idle(parent, RPM_ASYNC);
547                 spin_unlock(&parent->power.lock);
548
549                 spin_lock(&dev->power.lock);
550         }
551
552  out:
553         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
554
555         return retval;
556
557  fail:
558         dev_pm_disable_wake_irq(dev);
559         __update_runtime_status(dev, RPM_ACTIVE);
560         dev->power.deferred_resume = false;
561         wake_up_all(&dev->power.wait_queue);
562
563         if (retval == -EAGAIN || retval == -EBUSY) {
564                 dev->power.runtime_error = 0;
565
566                 /*
567                  * If the callback routine failed an autosuspend, and
568                  * if the last_busy time has been updated so that there
569                  * is a new autosuspend expiration time, automatically
570                  * reschedule another autosuspend.
571                  */
572                 if ((rpmflags & RPM_AUTO) &&
573                     pm_runtime_autosuspend_expiration(dev) != 0)
574                         goto repeat;
575         } else {
576                 pm_runtime_cancel_pending(dev);
577         }
578         goto out;
579 }
580
581 /**
582  * rpm_resume - Carry out runtime resume of given device.
583  * @dev: Device to resume.
584  * @rpmflags: Flag bits.
585  *
586  * Check if the device's runtime PM status allows it to be resumed.  Cancel
587  * any scheduled or pending requests.  If another resume has been started
588  * earlier, either return immediately or wait for it to finish, depending on the
589  * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
590  * parallel with this function, either tell the other process to resume after
591  * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
592  * flag is set then queue a resume request; otherwise run the
593  * ->runtime_resume() callback directly.  Queue an idle notification for the
594  * device if the resume succeeded.
595  *
596  * This function must be called under dev->power.lock with interrupts disabled.
597  */
598 static int rpm_resume(struct device *dev, int rpmflags)
599         __releases(&dev->power.lock) __acquires(&dev->power.lock)
600 {
601         int (*callback)(struct device *);
602         struct device *parent = NULL;
603         int retval = 0;
604
605         trace_rpm_resume_rcuidle(dev, rpmflags);
606
607  repeat:
608         if (dev->power.runtime_error)
609                 retval = -EINVAL;
610         else if (dev->power.disable_depth == 1 && dev->power.is_suspended
611             && dev->power.runtime_status == RPM_ACTIVE)
612                 retval = 1;
613         else if (dev->power.disable_depth > 0)
614                 retval = -EACCES;
615         if (retval)
616                 goto out;
617
618         /*
619          * Other scheduled or pending requests need to be canceled.  Small
620          * optimization: If an autosuspend timer is running, leave it running
621          * rather than cancelling it now only to restart it again in the near
622          * future.
623          */
624         dev->power.request = RPM_REQ_NONE;
625         if (!dev->power.timer_autosuspends)
626                 pm_runtime_deactivate_timer(dev);
627
628         if (dev->power.runtime_status == RPM_ACTIVE) {
629                 retval = 1;
630                 goto out;
631         }
632
633         if (dev->power.runtime_status == RPM_RESUMING
634             || dev->power.runtime_status == RPM_SUSPENDING) {
635                 DEFINE_WAIT(wait);
636
637                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
638                         if (dev->power.runtime_status == RPM_SUSPENDING)
639                                 dev->power.deferred_resume = true;
640                         else
641                                 retval = -EINPROGRESS;
642                         goto out;
643                 }
644
645                 if (dev->power.irq_safe) {
646                         spin_unlock(&dev->power.lock);
647
648                         cpu_relax();
649
650                         spin_lock(&dev->power.lock);
651                         goto repeat;
652                 }
653
654                 /* Wait for the operation carried out in parallel with us. */
655                 for (;;) {
656                         prepare_to_wait(&dev->power.wait_queue, &wait,
657                                         TASK_UNINTERRUPTIBLE);
658                         if (dev->power.runtime_status != RPM_RESUMING
659                             && dev->power.runtime_status != RPM_SUSPENDING)
660                                 break;
661
662                         spin_unlock_irq(&dev->power.lock);
663
664                         schedule();
665
666                         spin_lock_irq(&dev->power.lock);
667                 }
668                 finish_wait(&dev->power.wait_queue, &wait);
669                 goto repeat;
670         }
671
672         /*
673          * See if we can skip waking up the parent.  This is safe only if
674          * power.no_callbacks is set, because otherwise we don't know whether
675          * the resume will actually succeed.
676          */
677         if (dev->power.no_callbacks && !parent && dev->parent) {
678                 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
679                 if (dev->parent->power.disable_depth > 0
680                     || dev->parent->power.ignore_children
681                     || dev->parent->power.runtime_status == RPM_ACTIVE) {
682                         atomic_inc(&dev->parent->power.child_count);
683                         spin_unlock(&dev->parent->power.lock);
684                         retval = 1;
685                         goto no_callback;       /* Assume success. */
686                 }
687                 spin_unlock(&dev->parent->power.lock);
688         }
689
690         /* Carry out an asynchronous or a synchronous resume. */
691         if (rpmflags & RPM_ASYNC) {
692                 dev->power.request = RPM_REQ_RESUME;
693                 if (!dev->power.request_pending) {
694                         dev->power.request_pending = true;
695                         queue_work(pm_wq, &dev->power.work);
696                 }
697                 retval = 0;
698                 goto out;
699         }
700
701         if (!parent && dev->parent) {
702                 /*
703                  * Increment the parent's usage counter and resume it if
704                  * necessary.  Not needed if dev is irq-safe; then the
705                  * parent is permanently resumed.
706                  */
707                 parent = dev->parent;
708                 if (dev->power.irq_safe)
709                         goto skip_parent;
710                 spin_unlock(&dev->power.lock);
711
712                 pm_runtime_get_noresume(parent);
713
714                 spin_lock(&parent->power.lock);
715                 /*
716                  * Resume the parent if it has runtime PM enabled and not been
717                  * set to ignore its children.
718                  */
719                 if (!parent->power.disable_depth
720                     && !parent->power.ignore_children) {
721                         rpm_resume(parent, 0);
722                         if (parent->power.runtime_status != RPM_ACTIVE)
723                                 retval = -EBUSY;
724                 }
725                 spin_unlock(&parent->power.lock);
726
727                 spin_lock(&dev->power.lock);
728                 if (retval)
729                         goto out;
730                 goto repeat;
731         }
732  skip_parent:
733
734         if (dev->power.no_callbacks)
735                 goto no_callback;       /* Assume success. */
736
737         __update_runtime_status(dev, RPM_RESUMING);
738
739         callback = RPM_GET_CALLBACK(dev, runtime_resume);
740
741         dev_pm_disable_wake_irq(dev);
742         retval = rpm_callback(callback, dev);
743         if (retval) {
744                 __update_runtime_status(dev, RPM_SUSPENDED);
745                 pm_runtime_cancel_pending(dev);
746                 dev_pm_enable_wake_irq(dev);
747         } else {
748  no_callback:
749                 __update_runtime_status(dev, RPM_ACTIVE);
750                 pm_runtime_mark_last_busy(dev);
751                 if (parent)
752                         atomic_inc(&parent->power.child_count);
753         }
754         wake_up_all(&dev->power.wait_queue);
755
756         if (retval >= 0)
757                 rpm_idle(dev, RPM_ASYNC);
758
759  out:
760         if (parent && !dev->power.irq_safe) {
761                 spin_unlock_irq(&dev->power.lock);
762
763                 pm_runtime_put(parent);
764
765                 spin_lock_irq(&dev->power.lock);
766         }
767
768         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
769
770         return retval;
771 }
772
773 /**
774  * pm_runtime_work - Universal runtime PM work function.
775  * @work: Work structure used for scheduling the execution of this function.
776  *
777  * Use @work to get the device object the work is to be done for, determine what
778  * is to be done and execute the appropriate runtime PM function.
779  */
780 static void pm_runtime_work(struct work_struct *work)
781 {
782         struct device *dev = container_of(work, struct device, power.work);
783         enum rpm_request req;
784
785         spin_lock_irq(&dev->power.lock);
786
787         if (!dev->power.request_pending)
788                 goto out;
789
790         req = dev->power.request;
791         dev->power.request = RPM_REQ_NONE;
792         dev->power.request_pending = false;
793
794         switch (req) {
795         case RPM_REQ_NONE:
796                 break;
797         case RPM_REQ_IDLE:
798                 rpm_idle(dev, RPM_NOWAIT);
799                 break;
800         case RPM_REQ_SUSPEND:
801                 rpm_suspend(dev, RPM_NOWAIT);
802                 break;
803         case RPM_REQ_AUTOSUSPEND:
804                 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
805                 break;
806         case RPM_REQ_RESUME:
807                 rpm_resume(dev, RPM_NOWAIT);
808                 break;
809         }
810
811  out:
812         spin_unlock_irq(&dev->power.lock);
813 }
814
815 /**
816  * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
817  * @data: Device pointer passed by pm_schedule_suspend().
818  *
819  * Check if the time is right and queue a suspend request.
820  */
821 static void pm_suspend_timer_fn(unsigned long data)
822 {
823         struct device *dev = (struct device *)data;
824         unsigned long flags;
825         unsigned long expires;
826
827         spin_lock_irqsave(&dev->power.lock, flags);
828
829         expires = dev->power.timer_expires;
830         /* If 'expire' is after 'jiffies' we've been called too early. */
831         if (expires > 0 && !time_after(expires, jiffies)) {
832                 dev->power.timer_expires = 0;
833                 rpm_suspend(dev, dev->power.timer_autosuspends ?
834                     (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
835         }
836
837         spin_unlock_irqrestore(&dev->power.lock, flags);
838 }
839
840 /**
841  * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
842  * @dev: Device to suspend.
843  * @delay: Time to wait before submitting a suspend request, in milliseconds.
844  */
845 int pm_schedule_suspend(struct device *dev, unsigned int delay)
846 {
847         unsigned long flags;
848         int retval;
849
850         spin_lock_irqsave(&dev->power.lock, flags);
851
852         if (!delay) {
853                 retval = rpm_suspend(dev, RPM_ASYNC);
854                 goto out;
855         }
856
857         retval = rpm_check_suspend_allowed(dev);
858         if (retval)
859                 goto out;
860
861         /* Other scheduled or pending requests need to be canceled. */
862         pm_runtime_cancel_pending(dev);
863
864         dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
865         dev->power.timer_expires += !dev->power.timer_expires;
866         dev->power.timer_autosuspends = 0;
867         mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
868
869  out:
870         spin_unlock_irqrestore(&dev->power.lock, flags);
871
872         return retval;
873 }
874 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
875
876 /**
877  * __pm_runtime_idle - Entry point for runtime idle operations.
878  * @dev: Device to send idle notification for.
879  * @rpmflags: Flag bits.
880  *
881  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
882  * return immediately if it is larger than zero.  Then carry out an idle
883  * notification, either synchronous or asynchronous.
884  *
885  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
886  * or if pm_runtime_irq_safe() has been called.
887  */
888 int __pm_runtime_idle(struct device *dev, int rpmflags)
889 {
890         unsigned long flags;
891         int retval;
892
893         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
894
895         if (rpmflags & RPM_GET_PUT) {
896                 if (!atomic_dec_and_test(&dev->power.usage_count))
897                         return 0;
898         }
899
900         spin_lock_irqsave(&dev->power.lock, flags);
901         retval = rpm_idle(dev, rpmflags);
902         spin_unlock_irqrestore(&dev->power.lock, flags);
903
904         return retval;
905 }
906 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
907
908 /**
909  * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
910  * @dev: Device to suspend.
911  * @rpmflags: Flag bits.
912  *
913  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
914  * return immediately if it is larger than zero.  Then carry out a suspend,
915  * either synchronous or asynchronous.
916  *
917  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
918  * or if pm_runtime_irq_safe() has been called.
919  */
920 int __pm_runtime_suspend(struct device *dev, int rpmflags)
921 {
922         unsigned long flags;
923         int retval;
924
925         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
926
927         if (rpmflags & RPM_GET_PUT) {
928                 if (!atomic_dec_and_test(&dev->power.usage_count))
929                         return 0;
930         }
931
932         spin_lock_irqsave(&dev->power.lock, flags);
933         retval = rpm_suspend(dev, rpmflags);
934         spin_unlock_irqrestore(&dev->power.lock, flags);
935
936         return retval;
937 }
938 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
939
940 /**
941  * __pm_runtime_resume - Entry point for runtime resume operations.
942  * @dev: Device to resume.
943  * @rpmflags: Flag bits.
944  *
945  * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
946  * carry out a resume, either synchronous or asynchronous.
947  *
948  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
949  * or if pm_runtime_irq_safe() has been called.
950  */
951 int __pm_runtime_resume(struct device *dev, int rpmflags)
952 {
953         unsigned long flags;
954         int retval;
955
956         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
957
958         if (rpmflags & RPM_GET_PUT)
959                 atomic_inc(&dev->power.usage_count);
960
961         spin_lock_irqsave(&dev->power.lock, flags);
962         retval = rpm_resume(dev, rpmflags);
963         spin_unlock_irqrestore(&dev->power.lock, flags);
964
965         return retval;
966 }
967 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
968
969 /**
970  * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
971  * @dev: Device to handle.
972  *
973  * Return -EINVAL if runtime PM is disabled for the device.
974  *
975  * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
976  * and the runtime PM usage counter is nonzero, increment the counter and
977  * return 1.  Otherwise return 0 without changing the counter.
978  */
979 int pm_runtime_get_if_in_use(struct device *dev)
980 {
981         unsigned long flags;
982         int retval;
983
984         spin_lock_irqsave(&dev->power.lock, flags);
985         retval = dev->power.disable_depth > 0 ? -EINVAL :
986                 dev->power.runtime_status == RPM_ACTIVE
987                         && atomic_inc_not_zero(&dev->power.usage_count);
988         spin_unlock_irqrestore(&dev->power.lock, flags);
989         return retval;
990 }
991 EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
992
993 /**
994  * __pm_runtime_set_status - Set runtime PM status of a device.
995  * @dev: Device to handle.
996  * @status: New runtime PM status of the device.
997  *
998  * If runtime PM of the device is disabled or its power.runtime_error field is
999  * different from zero, the status may be changed either to RPM_ACTIVE, or to
1000  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1001  * However, if the device has a parent and the parent is not active, and the
1002  * parent's power.ignore_children flag is unset, the device's status cannot be
1003  * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1004  *
1005  * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1006  * and the device parent's counter of unsuspended children is modified to
1007  * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
1008  * notification request for the parent is submitted.
1009  */
1010 int __pm_runtime_set_status(struct device *dev, unsigned int status)
1011 {
1012         struct device *parent = dev->parent;
1013         unsigned long flags;
1014         bool notify_parent = false;
1015         int error = 0;
1016
1017         if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1018                 return -EINVAL;
1019
1020         spin_lock_irqsave(&dev->power.lock, flags);
1021
1022         if (!dev->power.runtime_error && !dev->power.disable_depth) {
1023                 error = -EAGAIN;
1024                 goto out;
1025         }
1026
1027         if (dev->power.runtime_status == status)
1028                 goto out_set;
1029
1030         if (status == RPM_SUSPENDED) {
1031                 /*
1032                  * It is invalid to suspend a device with an active child,
1033                  * unless it has been set to ignore its children.
1034                  */
1035                 if (!dev->power.ignore_children &&
1036                         atomic_read(&dev->power.child_count)) {
1037                         dev_err(dev, "runtime PM trying to suspend device but active child\n");
1038                         error = -EBUSY;
1039                         goto out;
1040                 }
1041
1042                 if (parent) {
1043                         atomic_add_unless(&parent->power.child_count, -1, 0);
1044                         notify_parent = !parent->power.ignore_children;
1045                 }
1046                 goto out_set;
1047         }
1048
1049         if (parent) {
1050                 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1051
1052                 /*
1053                  * It is invalid to put an active child under a parent that is
1054                  * not active, has runtime PM enabled and the
1055                  * 'power.ignore_children' flag unset.
1056                  */
1057                 if (!parent->power.disable_depth
1058                     && !parent->power.ignore_children
1059                     && parent->power.runtime_status != RPM_ACTIVE) {
1060                         dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1061                                 dev_name(dev),
1062                                 dev_name(parent));
1063                         error = -EBUSY;
1064                 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1065                         atomic_inc(&parent->power.child_count);
1066                 }
1067
1068                 spin_unlock(&parent->power.lock);
1069
1070                 if (error)
1071                         goto out;
1072         }
1073
1074  out_set:
1075         __update_runtime_status(dev, status);
1076         dev->power.runtime_error = 0;
1077  out:
1078         spin_unlock_irqrestore(&dev->power.lock, flags);
1079
1080         if (notify_parent)
1081                 pm_request_idle(parent);
1082
1083         return error;
1084 }
1085 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1086
1087 /**
1088  * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1089  * @dev: Device to handle.
1090  *
1091  * Flush all pending requests for the device from pm_wq and wait for all
1092  * runtime PM operations involving the device in progress to complete.
1093  *
1094  * Should be called under dev->power.lock with interrupts disabled.
1095  */
1096 static void __pm_runtime_barrier(struct device *dev)
1097 {
1098         pm_runtime_deactivate_timer(dev);
1099
1100         if (dev->power.request_pending) {
1101                 dev->power.request = RPM_REQ_NONE;
1102                 spin_unlock_irq(&dev->power.lock);
1103
1104                 cancel_work_sync(&dev->power.work);
1105
1106                 spin_lock_irq(&dev->power.lock);
1107                 dev->power.request_pending = false;
1108         }
1109
1110         if (dev->power.runtime_status == RPM_SUSPENDING
1111             || dev->power.runtime_status == RPM_RESUMING
1112             || dev->power.idle_notification) {
1113                 DEFINE_WAIT(wait);
1114
1115                 /* Suspend, wake-up or idle notification in progress. */
1116                 for (;;) {
1117                         prepare_to_wait(&dev->power.wait_queue, &wait,
1118                                         TASK_UNINTERRUPTIBLE);
1119                         if (dev->power.runtime_status != RPM_SUSPENDING
1120                             && dev->power.runtime_status != RPM_RESUMING
1121                             && !dev->power.idle_notification)
1122                                 break;
1123                         spin_unlock_irq(&dev->power.lock);
1124
1125                         schedule();
1126
1127                         spin_lock_irq(&dev->power.lock);
1128                 }
1129                 finish_wait(&dev->power.wait_queue, &wait);
1130         }
1131 }
1132
1133 /**
1134  * pm_runtime_barrier - Flush pending requests and wait for completions.
1135  * @dev: Device to handle.
1136  *
1137  * Prevent the device from being suspended by incrementing its usage counter and
1138  * if there's a pending resume request for the device, wake the device up.
1139  * Next, make sure that all pending requests for the device have been flushed
1140  * from pm_wq and wait for all runtime PM operations involving the device in
1141  * progress to complete.
1142  *
1143  * Return value:
1144  * 1, if there was a resume request pending and the device had to be woken up,
1145  * 0, otherwise
1146  */
1147 int pm_runtime_barrier(struct device *dev)
1148 {
1149         int retval = 0;
1150
1151         pm_runtime_get_noresume(dev);
1152         spin_lock_irq(&dev->power.lock);
1153
1154         if (dev->power.request_pending
1155             && dev->power.request == RPM_REQ_RESUME) {
1156                 rpm_resume(dev, 0);
1157                 retval = 1;
1158         }
1159
1160         __pm_runtime_barrier(dev);
1161
1162         spin_unlock_irq(&dev->power.lock);
1163         pm_runtime_put_noidle(dev);
1164
1165         return retval;
1166 }
1167 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1168
1169 /**
1170  * __pm_runtime_disable - Disable runtime PM of a device.
1171  * @dev: Device to handle.
1172  * @check_resume: If set, check if there's a resume request for the device.
1173  *
1174  * Increment power.disable_depth for the device and if it was zero previously,
1175  * cancel all pending runtime PM requests for the device and wait for all
1176  * operations in progress to complete.  The device can be either active or
1177  * suspended after its runtime PM has been disabled.
1178  *
1179  * If @check_resume is set and there's a resume request pending when
1180  * __pm_runtime_disable() is called and power.disable_depth is zero, the
1181  * function will wake up the device before disabling its runtime PM.
1182  */
1183 void __pm_runtime_disable(struct device *dev, bool check_resume)
1184 {
1185         spin_lock_irq(&dev->power.lock);
1186
1187         if (dev->power.disable_depth > 0) {
1188                 dev->power.disable_depth++;
1189                 goto out;
1190         }
1191
1192         /*
1193          * Wake up the device if there's a resume request pending, because that
1194          * means there probably is some I/O to process and disabling runtime PM
1195          * shouldn't prevent the device from processing the I/O.
1196          */
1197         if (check_resume && dev->power.request_pending
1198             && dev->power.request == RPM_REQ_RESUME) {
1199                 /*
1200                  * Prevent suspends and idle notifications from being carried
1201                  * out after we have woken up the device.
1202                  */
1203                 pm_runtime_get_noresume(dev);
1204
1205                 rpm_resume(dev, 0);
1206
1207                 pm_runtime_put_noidle(dev);
1208         }
1209
1210         if (!dev->power.disable_depth++)
1211                 __pm_runtime_barrier(dev);
1212
1213  out:
1214         spin_unlock_irq(&dev->power.lock);
1215 }
1216 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1217
1218 /**
1219  * pm_runtime_enable - Enable runtime PM of a device.
1220  * @dev: Device to handle.
1221  */
1222 void pm_runtime_enable(struct device *dev)
1223 {
1224         unsigned long flags;
1225
1226         spin_lock_irqsave(&dev->power.lock, flags);
1227
1228         if (dev->power.disable_depth > 0)
1229                 dev->power.disable_depth--;
1230         else
1231                 dev_warn(dev, "Unbalanced %s!\n", __func__);
1232
1233         spin_unlock_irqrestore(&dev->power.lock, flags);
1234 }
1235 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1236
1237 /**
1238  * pm_runtime_forbid - Block runtime PM of a device.
1239  * @dev: Device to handle.
1240  *
1241  * Increase the device's usage count and clear its power.runtime_auto flag,
1242  * so that it cannot be suspended at run time until pm_runtime_allow() is called
1243  * for it.
1244  */
1245 void pm_runtime_forbid(struct device *dev)
1246 {
1247         spin_lock_irq(&dev->power.lock);
1248         if (!dev->power.runtime_auto)
1249                 goto out;
1250
1251         dev->power.runtime_auto = false;
1252         atomic_inc(&dev->power.usage_count);
1253         rpm_resume(dev, 0);
1254
1255  out:
1256         spin_unlock_irq(&dev->power.lock);
1257 }
1258 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1259
1260 /**
1261  * pm_runtime_allow - Unblock runtime PM of a device.
1262  * @dev: Device to handle.
1263  *
1264  * Decrease the device's usage count and set its power.runtime_auto flag.
1265  */
1266 void pm_runtime_allow(struct device *dev)
1267 {
1268         spin_lock_irq(&dev->power.lock);
1269         if (dev->power.runtime_auto)
1270                 goto out;
1271
1272         dev->power.runtime_auto = true;
1273         if (atomic_dec_and_test(&dev->power.usage_count))
1274                 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1275
1276  out:
1277         spin_unlock_irq(&dev->power.lock);
1278 }
1279 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1280
1281 /**
1282  * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1283  * @dev: Device to handle.
1284  *
1285  * Set the power.no_callbacks flag, which tells the PM core that this
1286  * device is power-managed through its parent and has no runtime PM
1287  * callbacks of its own.  The runtime sysfs attributes will be removed.
1288  */
1289 void pm_runtime_no_callbacks(struct device *dev)
1290 {
1291         spin_lock_irq(&dev->power.lock);
1292         dev->power.no_callbacks = 1;
1293         spin_unlock_irq(&dev->power.lock);
1294         if (device_is_registered(dev))
1295                 rpm_sysfs_remove(dev);
1296 }
1297 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1298
1299 /**
1300  * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1301  * @dev: Device to handle
1302  *
1303  * Set the power.irq_safe flag, which tells the PM core that the
1304  * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1305  * always be invoked with the spinlock held and interrupts disabled.  It also
1306  * causes the parent's usage counter to be permanently incremented, preventing
1307  * the parent from runtime suspending -- otherwise an irq-safe child might have
1308  * to wait for a non-irq-safe parent.
1309  */
1310 void pm_runtime_irq_safe(struct device *dev)
1311 {
1312         if (dev->parent)
1313                 pm_runtime_get_sync(dev->parent);
1314         spin_lock_irq(&dev->power.lock);
1315         dev->power.irq_safe = 1;
1316         spin_unlock_irq(&dev->power.lock);
1317 }
1318 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1319
1320 /**
1321  * update_autosuspend - Handle a change to a device's autosuspend settings.
1322  * @dev: Device to handle.
1323  * @old_delay: The former autosuspend_delay value.
1324  * @old_use: The former use_autosuspend value.
1325  *
1326  * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1327  * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1328  *
1329  * This function must be called under dev->power.lock with interrupts disabled.
1330  */
1331 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1332 {
1333         int delay = dev->power.autosuspend_delay;
1334
1335         /* Should runtime suspend be prevented now? */
1336         if (dev->power.use_autosuspend && delay < 0) {
1337
1338                 /* If it used to be allowed then prevent it. */
1339                 if (!old_use || old_delay >= 0) {
1340                         atomic_inc(&dev->power.usage_count);
1341                         rpm_resume(dev, 0);
1342                 }
1343         }
1344
1345         /* Runtime suspend should be allowed now. */
1346         else {
1347
1348                 /* If it used to be prevented then allow it. */
1349                 if (old_use && old_delay < 0)
1350                         atomic_dec(&dev->power.usage_count);
1351
1352                 /* Maybe we can autosuspend now. */
1353                 rpm_idle(dev, RPM_AUTO);
1354         }
1355 }
1356
1357 /**
1358  * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1359  * @dev: Device to handle.
1360  * @delay: Value of the new delay in milliseconds.
1361  *
1362  * Set the device's power.autosuspend_delay value.  If it changes to negative
1363  * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
1364  * changes the other way, allow runtime suspends.
1365  */
1366 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1367 {
1368         int old_delay, old_use;
1369
1370         spin_lock_irq(&dev->power.lock);
1371         old_delay = dev->power.autosuspend_delay;
1372         old_use = dev->power.use_autosuspend;
1373         dev->power.autosuspend_delay = delay;
1374         update_autosuspend(dev, old_delay, old_use);
1375         spin_unlock_irq(&dev->power.lock);
1376 }
1377 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1378
1379 /**
1380  * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1381  * @dev: Device to handle.
1382  * @use: New value for use_autosuspend.
1383  *
1384  * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1385  * suspends as needed.
1386  */
1387 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1388 {
1389         int old_delay, old_use;
1390
1391         spin_lock_irq(&dev->power.lock);
1392         old_delay = dev->power.autosuspend_delay;
1393         old_use = dev->power.use_autosuspend;
1394         dev->power.use_autosuspend = use;
1395         update_autosuspend(dev, old_delay, old_use);
1396         spin_unlock_irq(&dev->power.lock);
1397 }
1398 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1399
1400 /**
1401  * pm_runtime_init - Initialize runtime PM fields in given device object.
1402  * @dev: Device object to initialize.
1403  */
1404 void pm_runtime_init(struct device *dev)
1405 {
1406         dev->power.runtime_status = RPM_SUSPENDED;
1407         dev->power.idle_notification = false;
1408
1409         dev->power.disable_depth = 1;
1410         atomic_set(&dev->power.usage_count, 0);
1411
1412         dev->power.runtime_error = 0;
1413
1414         atomic_set(&dev->power.child_count, 0);
1415         pm_suspend_ignore_children(dev, false);
1416         dev->power.runtime_auto = true;
1417
1418         dev->power.request_pending = false;
1419         dev->power.request = RPM_REQ_NONE;
1420         dev->power.deferred_resume = false;
1421         dev->power.accounting_timestamp = jiffies;
1422         INIT_WORK(&dev->power.work, pm_runtime_work);
1423
1424         dev->power.timer_expires = 0;
1425         setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1426                         (unsigned long)dev);
1427
1428         init_waitqueue_head(&dev->power.wait_queue);
1429 }
1430
1431 /**
1432  * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1433  * @dev: Device object to re-initialize.
1434  */
1435 void pm_runtime_reinit(struct device *dev)
1436 {
1437         if (!pm_runtime_enabled(dev)) {
1438                 if (dev->power.runtime_status == RPM_ACTIVE)
1439                         pm_runtime_set_suspended(dev);
1440                 if (dev->power.irq_safe) {
1441                         spin_lock_irq(&dev->power.lock);
1442                         dev->power.irq_safe = 0;
1443                         spin_unlock_irq(&dev->power.lock);
1444                         if (dev->parent)
1445                                 pm_runtime_put(dev->parent);
1446                 }
1447         }
1448 }
1449
1450 /**
1451  * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1452  * @dev: Device object being removed from device hierarchy.
1453  */
1454 void pm_runtime_remove(struct device *dev)
1455 {
1456         __pm_runtime_disable(dev, false);
1457         pm_runtime_reinit(dev);
1458 }
1459
1460 /**
1461  * pm_runtime_force_suspend - Force a device into suspend state if needed.
1462  * @dev: Device to suspend.
1463  *
1464  * Disable runtime PM so we safely can check the device's runtime PM status and
1465  * if it is active, invoke it's .runtime_suspend callback to bring it into
1466  * suspend state. Keep runtime PM disabled to preserve the state unless we
1467  * encounter errors.
1468  *
1469  * Typically this function may be invoked from a system suspend callback to make
1470  * sure the device is put into low power state.
1471  */
1472 int pm_runtime_force_suspend(struct device *dev)
1473 {
1474         int (*callback)(struct device *);
1475         int ret = 0;
1476
1477         pm_runtime_disable(dev);
1478         if (pm_runtime_status_suspended(dev))
1479                 return 0;
1480
1481         callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1482
1483         if (!callback) {
1484                 ret = -ENOSYS;
1485                 goto err;
1486         }
1487
1488         ret = callback(dev);
1489         if (ret)
1490                 goto err;
1491
1492         /*
1493          * Increase the runtime PM usage count for the device's parent, in case
1494          * when we find the device being used when system suspend was invoked.
1495          * This informs pm_runtime_force_resume() to resume the parent
1496          * immediately, which is needed to be able to resume its children,
1497          * when not deferring the resume to be managed via runtime PM.
1498          */
1499         if (dev->parent && atomic_read(&dev->power.usage_count) > 1)
1500                 pm_runtime_get_noresume(dev->parent);
1501
1502         pm_runtime_set_suspended(dev);
1503         return 0;
1504 err:
1505         pm_runtime_enable(dev);
1506         return ret;
1507 }
1508 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1509
1510 /**
1511  * pm_runtime_force_resume - Force a device into resume state if needed.
1512  * @dev: Device to resume.
1513  *
1514  * Prior invoking this function we expect the user to have brought the device
1515  * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1516  * those actions and brings the device into full power, if it is expected to be
1517  * used on system resume. To distinguish that, we check whether the runtime PM
1518  * usage count is greater than 1 (the PM core increases the usage count in the
1519  * system PM prepare phase), as that indicates a real user (such as a subsystem,
1520  * driver, userspace, etc.) is using it. If that is the case, the device is
1521  * expected to be used on system resume as well, so then we resume it. In the
1522  * other case, we defer the resume to be managed via runtime PM.
1523  *
1524  * Typically this function may be invoked from a system resume callback.
1525  */
1526 int pm_runtime_force_resume(struct device *dev)
1527 {
1528         int (*callback)(struct device *);
1529         int ret = 0;
1530
1531         callback = RPM_GET_CALLBACK(dev, runtime_resume);
1532
1533         if (!callback) {
1534                 ret = -ENOSYS;
1535                 goto out;
1536         }
1537
1538         if (!pm_runtime_status_suspended(dev))
1539                 goto out;
1540
1541         /*
1542          * Decrease the parent's runtime PM usage count, if we increased it
1543          * during system suspend in pm_runtime_force_suspend().
1544         */
1545         if (atomic_read(&dev->power.usage_count) > 1) {
1546                 if (dev->parent)
1547                         pm_runtime_put_noidle(dev->parent);
1548         } else {
1549                 goto out;
1550         }
1551
1552         ret = pm_runtime_set_active(dev);
1553         if (ret)
1554                 goto out;
1555
1556         ret = callback(dev);
1557         if (ret) {
1558                 pm_runtime_set_suspended(dev);
1559                 goto out;
1560         }
1561
1562         pm_runtime_mark_last_busy(dev);
1563 out:
1564         pm_runtime_enable(dev);
1565         return ret;
1566 }
1567 EXPORT_SYMBOL_GPL(pm_runtime_force_resume);