PM / Runtime: Merge synchronous and async runtime routines
[linux-2.6-block.git] / drivers / base / power / runtime.c
CommitLineData
5e928f77
RW
1/*
2 * drivers/base/power/runtime.c - Helper functions for device run-time PM
3 *
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
1bfee5bc 5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
5e928f77
RW
6 *
7 * This file is released under the GPLv2.
8 */
9
10#include <linux/sched.h>
11#include <linux/pm_runtime.h>
12#include <linux/jiffies.h>
13
3f9af051 14static int __pm_runtime_resume(struct device *dev, int rpmflags);
5e928f77 15
4769373c
AS
16/**
17 * update_pm_runtime_accounting - Update the time accounting of power states
18 * @dev: Device to update the accounting for
19 *
20 * In order to be able to have time accounting of the various power states
21 * (as used by programs such as PowerTOP to show the effectiveness of runtime
22 * PM), we need to track the time spent in each state.
23 * update_pm_runtime_accounting must be called each time before the
24 * runtime_status field is updated, to account the time in the old state
25 * correctly.
26 */
27void update_pm_runtime_accounting(struct device *dev)
28{
29 unsigned long now = jiffies;
30 int delta;
31
32 delta = now - dev->power.accounting_timestamp;
33
34 if (delta < 0)
35 delta = 0;
36
37 dev->power.accounting_timestamp = now;
38
39 if (dev->power.disable_depth > 0)
40 return;
41
42 if (dev->power.runtime_status == RPM_SUSPENDED)
43 dev->power.suspended_jiffies += delta;
44 else
45 dev->power.active_jiffies += delta;
46}
47
48static void __update_runtime_status(struct device *dev, enum rpm_status status)
49{
50 update_pm_runtime_accounting(dev);
51 dev->power.runtime_status = status;
52}
53
5e928f77
RW
54/**
55 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
56 * @dev: Device to handle.
57 */
58static void pm_runtime_deactivate_timer(struct device *dev)
59{
60 if (dev->power.timer_expires > 0) {
61 del_timer(&dev->power.suspend_timer);
62 dev->power.timer_expires = 0;
63 }
64}
65
66/**
67 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
68 * @dev: Device to handle.
69 */
70static void pm_runtime_cancel_pending(struct device *dev)
71{
72 pm_runtime_deactivate_timer(dev);
73 /*
74 * In case there's a request pending, make sure its work function will
75 * return without doing anything.
76 */
77 dev->power.request = RPM_REQ_NONE;
78}
79
80/**
1bfee5bc
AS
81 * rpm_check_suspend_allowed - Test whether a device may be suspended.
82 * @dev: Device to test.
5e928f77 83 */
1bfee5bc 84static int rpm_check_suspend_allowed(struct device *dev)
5e928f77
RW
85{
86 int retval = 0;
87
5e928f77
RW
88 if (dev->power.runtime_error)
89 retval = -EINVAL;
5e928f77 90 else if (atomic_read(&dev->power.usage_count) > 0
1bfee5bc 91 || dev->power.disable_depth > 0)
5e928f77
RW
92 retval = -EAGAIN;
93 else if (!pm_children_suspended(dev))
94 retval = -EBUSY;
1bfee5bc
AS
95
96 /* Pending resume requests take precedence over suspends. */
97 else if ((dev->power.deferred_resume
98 && dev->power.status == RPM_SUSPENDING)
99 || (dev->power.request_pending
100 && dev->power.request == RPM_REQ_RESUME))
101 retval = -EAGAIN;
102 else if (dev->power.runtime_status == RPM_SUSPENDED)
103 retval = 1;
104
105 return retval;
106}
107
108
109/**
110 * __pm_runtime_idle - Notify device bus type if the device can be suspended.
111 * @dev: Device to notify the bus type about.
112 * @rpmflags: Flag bits.
113 *
114 * Check if the device's run-time PM status allows it to be suspended. If
115 * another idle notification has been started earlier, return immediately. If
116 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
117 * run the ->runtime_idle() callback directly.
118 *
119 * This function must be called under dev->power.lock with interrupts disabled.
120 */
121static int __pm_runtime_idle(struct device *dev, int rpmflags)
122 __releases(&dev->power.lock) __acquires(&dev->power.lock)
123{
124 int retval;
125
126 retval = rpm_check_suspend_allowed(dev);
127 if (retval < 0)
128 ; /* Conditions are wrong. */
129
130 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
131 else if (dev->power.runtime_status != RPM_ACTIVE)
132 retval = -EAGAIN;
133
134 /*
135 * Any pending request other than an idle notification takes
136 * precedence over us, except that the timer may be running.
137 */
138 else if (dev->power.request_pending &&
139 dev->power.request > RPM_REQ_IDLE)
140 retval = -EAGAIN;
141
142 /* Act as though RPM_NOWAIT is always set. */
143 else if (dev->power.idle_notification)
144 retval = -EINPROGRESS;
5e928f77
RW
145 if (retval)
146 goto out;
147
1bfee5bc
AS
148 /* Pending requests need to be canceled. */
149 dev->power.request = RPM_REQ_NONE;
150
151 /* Carry out an asynchronous or a synchronous idle notification. */
152 if (rpmflags & RPM_ASYNC) {
153 dev->power.request = RPM_REQ_IDLE;
154 if (!dev->power.request_pending) {
155 dev->power.request_pending = true;
156 queue_work(pm_wq, &dev->power.work);
5e928f77 157 }
1bfee5bc 158 goto out;
5e928f77
RW
159 }
160
161 dev->power.idle_notification = true;
162
163 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) {
164 spin_unlock_irq(&dev->power.lock);
165
166 dev->bus->pm->runtime_idle(dev);
167
a6ab7aa9
RW
168 spin_lock_irq(&dev->power.lock);
169 } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) {
170 spin_unlock_irq(&dev->power.lock);
171
172 dev->type->pm->runtime_idle(dev);
173
174 spin_lock_irq(&dev->power.lock);
175 } else if (dev->class && dev->class->pm
176 && dev->class->pm->runtime_idle) {
177 spin_unlock_irq(&dev->power.lock);
178
179 dev->class->pm->runtime_idle(dev);
180
5e928f77
RW
181 spin_lock_irq(&dev->power.lock);
182 }
183
184 dev->power.idle_notification = false;
185 wake_up_all(&dev->power.wait_queue);
186
187 out:
5e928f77
RW
188 return retval;
189}
190
191/**
192 * pm_runtime_idle - Notify device bus type if the device can be suspended.
193 * @dev: Device to notify the bus type about.
194 */
195int pm_runtime_idle(struct device *dev)
196{
197 int retval;
198
199 spin_lock_irq(&dev->power.lock);
1bfee5bc 200 retval = __pm_runtime_idle(dev, 0);
5e928f77
RW
201 spin_unlock_irq(&dev->power.lock);
202
203 return retval;
204}
205EXPORT_SYMBOL_GPL(pm_runtime_idle);
206
207/**
208 * __pm_runtime_suspend - Carry out run-time suspend of given device.
209 * @dev: Device to suspend.
3f9af051 210 * @rpmflags: Flag bits.
5e928f77 211 *
1bfee5bc
AS
212 * Check if the device's run-time PM status allows it to be suspended. If
213 * another suspend has been started earlier, either return immediately or wait
214 * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a
215 * pending idle notification. If the RPM_ASYNC flag is set then queue a
216 * suspend request; otherwise run the ->runtime_suspend() callback directly.
217 * If a deferred resume was requested while the callback was running then carry
218 * it out; otherwise send an idle notification for the device (if the suspend
219 * failed) or for its parent (if the suspend succeeded).
5e928f77
RW
220 *
221 * This function must be called under dev->power.lock with interrupts disabled.
222 */
3f9af051 223static int __pm_runtime_suspend(struct device *dev, int rpmflags)
5e928f77
RW
224 __releases(&dev->power.lock) __acquires(&dev->power.lock)
225{
226 struct device *parent = NULL;
227 bool notify = false;
1bfee5bc 228 int retval;
5e928f77 229
3f9af051 230 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
5e928f77
RW
231
232 repeat:
1bfee5bc 233 retval = rpm_check_suspend_allowed(dev);
5e928f77 234
1bfee5bc
AS
235 if (retval < 0)
236 ; /* Conditions are wrong. */
237
238 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
239 else if (dev->power.runtime_status == RPM_RESUMING &&
240 !(rpmflags & RPM_ASYNC))
5e928f77 241 retval = -EAGAIN;
1bfee5bc 242 if (retval)
5e928f77 243 goto out;
5e928f77
RW
244
245 /* Other scheduled or pending requests need to be canceled. */
246 pm_runtime_cancel_pending(dev);
247
5e928f77
RW
248 if (dev->power.runtime_status == RPM_SUSPENDING) {
249 DEFINE_WAIT(wait);
250
1bfee5bc 251 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
5e928f77
RW
252 retval = -EINPROGRESS;
253 goto out;
254 }
255
256 /* Wait for the other suspend running in parallel with us. */
257 for (;;) {
258 prepare_to_wait(&dev->power.wait_queue, &wait,
259 TASK_UNINTERRUPTIBLE);
260 if (dev->power.runtime_status != RPM_SUSPENDING)
261 break;
262
263 spin_unlock_irq(&dev->power.lock);
264
265 schedule();
266
267 spin_lock_irq(&dev->power.lock);
268 }
269 finish_wait(&dev->power.wait_queue, &wait);
270 goto repeat;
271 }
272
1bfee5bc
AS
273 /* Carry out an asynchronous or a synchronous suspend. */
274 if (rpmflags & RPM_ASYNC) {
275 dev->power.request = RPM_REQ_SUSPEND;
276 if (!dev->power.request_pending) {
277 dev->power.request_pending = true;
278 queue_work(pm_wq, &dev->power.work);
279 }
280 goto out;
281 }
282
8d4b9d1b 283 __update_runtime_status(dev, RPM_SUSPENDING);
63c94801 284 dev->power.deferred_resume = false;
5e928f77
RW
285
286 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) {
287 spin_unlock_irq(&dev->power.lock);
288
289 retval = dev->bus->pm->runtime_suspend(dev);
290
a6ab7aa9
RW
291 spin_lock_irq(&dev->power.lock);
292 dev->power.runtime_error = retval;
293 } else if (dev->type && dev->type->pm
294 && dev->type->pm->runtime_suspend) {
295 spin_unlock_irq(&dev->power.lock);
296
297 retval = dev->type->pm->runtime_suspend(dev);
298
299 spin_lock_irq(&dev->power.lock);
300 dev->power.runtime_error = retval;
301 } else if (dev->class && dev->class->pm
302 && dev->class->pm->runtime_suspend) {
303 spin_unlock_irq(&dev->power.lock);
304
305 retval = dev->class->pm->runtime_suspend(dev);
306
5e928f77
RW
307 spin_lock_irq(&dev->power.lock);
308 dev->power.runtime_error = retval;
309 } else {
310 retval = -ENOSYS;
311 }
312
313 if (retval) {
8d4b9d1b 314 __update_runtime_status(dev, RPM_ACTIVE);
1bfee5bc 315 dev->power.deferred_resume = 0;
5e928f77 316 if (retval == -EAGAIN || retval == -EBUSY) {
240c7337
AS
317 if (dev->power.timer_expires == 0)
318 notify = true;
5e928f77 319 dev->power.runtime_error = 0;
240c7337
AS
320 } else {
321 pm_runtime_cancel_pending(dev);
5e928f77
RW
322 }
323 } else {
8d4b9d1b 324 __update_runtime_status(dev, RPM_SUSPENDED);
240c7337 325 pm_runtime_deactivate_timer(dev);
5e928f77
RW
326
327 if (dev->parent) {
328 parent = dev->parent;
329 atomic_add_unless(&parent->power.child_count, -1, 0);
330 }
331 }
332 wake_up_all(&dev->power.wait_queue);
333
334 if (dev->power.deferred_resume) {
3f9af051 335 __pm_runtime_resume(dev, 0);
5e928f77
RW
336 retval = -EAGAIN;
337 goto out;
338 }
339
340 if (notify)
1bfee5bc 341 __pm_runtime_idle(dev, 0);
5e928f77
RW
342
343 if (parent && !parent->power.ignore_children) {
344 spin_unlock_irq(&dev->power.lock);
345
346 pm_request_idle(parent);
347
348 spin_lock_irq(&dev->power.lock);
349 }
350
351 out:
3f9af051 352 dev_dbg(dev, "%s returns %d\n", __func__, retval);
5e928f77
RW
353
354 return retval;
355}
356
357/**
358 * pm_runtime_suspend - Carry out run-time suspend of given device.
359 * @dev: Device to suspend.
360 */
361int pm_runtime_suspend(struct device *dev)
362{
363 int retval;
364
365 spin_lock_irq(&dev->power.lock);
3f9af051 366 retval = __pm_runtime_suspend(dev, 0);
5e928f77
RW
367 spin_unlock_irq(&dev->power.lock);
368
369 return retval;
370}
371EXPORT_SYMBOL_GPL(pm_runtime_suspend);
372
373/**
374 * __pm_runtime_resume - Carry out run-time resume of given device.
375 * @dev: Device to resume.
3f9af051 376 * @rpmflags: Flag bits.
5e928f77 377 *
1bfee5bc
AS
378 * Check if the device's run-time PM status allows it to be resumed. Cancel
379 * any scheduled or pending requests. If another resume has been started
380 * earlier, either return imediately or wait for it to finish, depending on the
381 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
382 * parallel with this function, either tell the other process to resume after
383 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
384 * flag is set then queue a resume request; otherwise run the
385 * ->runtime_resume() callback directly. Queue an idle notification for the
386 * device if the resume succeeded.
5e928f77
RW
387 *
388 * This function must be called under dev->power.lock with interrupts disabled.
389 */
3f9af051 390static int __pm_runtime_resume(struct device *dev, int rpmflags)
5e928f77
RW
391 __releases(&dev->power.lock) __acquires(&dev->power.lock)
392{
393 struct device *parent = NULL;
394 int retval = 0;
395
3f9af051 396 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
5e928f77
RW
397
398 repeat:
1bfee5bc 399 if (dev->power.runtime_error)
5e928f77 400 retval = -EINVAL;
1bfee5bc
AS
401 else if (dev->power.disable_depth > 0)
402 retval = -EAGAIN;
403 if (retval)
5e928f77 404 goto out;
5e928f77 405
1bfee5bc 406 /* Other scheduled or pending requests need to be canceled. */
5e928f77
RW
407 pm_runtime_cancel_pending(dev);
408
1bfee5bc 409 if (dev->power.runtime_status == RPM_ACTIVE) {
5e928f77 410 retval = 1;
5e928f77 411 goto out;
1bfee5bc 412 }
5e928f77
RW
413
414 if (dev->power.runtime_status == RPM_RESUMING
415 || dev->power.runtime_status == RPM_SUSPENDING) {
416 DEFINE_WAIT(wait);
417
1bfee5bc 418 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
5e928f77
RW
419 if (dev->power.runtime_status == RPM_SUSPENDING)
420 dev->power.deferred_resume = true;
1bfee5bc
AS
421 else
422 retval = -EINPROGRESS;
5e928f77
RW
423 goto out;
424 }
425
426 /* Wait for the operation carried out in parallel with us. */
427 for (;;) {
428 prepare_to_wait(&dev->power.wait_queue, &wait,
429 TASK_UNINTERRUPTIBLE);
430 if (dev->power.runtime_status != RPM_RESUMING
431 && dev->power.runtime_status != RPM_SUSPENDING)
432 break;
433
434 spin_unlock_irq(&dev->power.lock);
435
436 schedule();
437
438 spin_lock_irq(&dev->power.lock);
439 }
440 finish_wait(&dev->power.wait_queue, &wait);
441 goto repeat;
442 }
443
1bfee5bc
AS
444 /* Carry out an asynchronous or a synchronous resume. */
445 if (rpmflags & RPM_ASYNC) {
446 dev->power.request = RPM_REQ_RESUME;
447 if (!dev->power.request_pending) {
448 dev->power.request_pending = true;
449 queue_work(pm_wq, &dev->power.work);
450 }
451 retval = 0;
452 goto out;
453 }
454
5e928f77
RW
455 if (!parent && dev->parent) {
456 /*
457 * Increment the parent's resume counter and resume it if
458 * necessary.
459 */
460 parent = dev->parent;
862f89b3 461 spin_unlock(&dev->power.lock);
5e928f77
RW
462
463 pm_runtime_get_noresume(parent);
464
862f89b3 465 spin_lock(&parent->power.lock);
5e928f77
RW
466 /*
467 * We can resume if the parent's run-time PM is disabled or it
468 * is set to ignore children.
469 */
470 if (!parent->power.disable_depth
471 && !parent->power.ignore_children) {
3f9af051 472 __pm_runtime_resume(parent, 0);
5e928f77
RW
473 if (parent->power.runtime_status != RPM_ACTIVE)
474 retval = -EBUSY;
475 }
862f89b3 476 spin_unlock(&parent->power.lock);
5e928f77 477
862f89b3 478 spin_lock(&dev->power.lock);
5e928f77
RW
479 if (retval)
480 goto out;
481 goto repeat;
482 }
483
8d4b9d1b 484 __update_runtime_status(dev, RPM_RESUMING);
5e928f77
RW
485
486 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) {
487 spin_unlock_irq(&dev->power.lock);
488
489 retval = dev->bus->pm->runtime_resume(dev);
490
a6ab7aa9
RW
491 spin_lock_irq(&dev->power.lock);
492 dev->power.runtime_error = retval;
493 } else if (dev->type && dev->type->pm
494 && dev->type->pm->runtime_resume) {
495 spin_unlock_irq(&dev->power.lock);
496
497 retval = dev->type->pm->runtime_resume(dev);
498
499 spin_lock_irq(&dev->power.lock);
500 dev->power.runtime_error = retval;
501 } else if (dev->class && dev->class->pm
502 && dev->class->pm->runtime_resume) {
503 spin_unlock_irq(&dev->power.lock);
504
505 retval = dev->class->pm->runtime_resume(dev);
506
5e928f77
RW
507 spin_lock_irq(&dev->power.lock);
508 dev->power.runtime_error = retval;
509 } else {
510 retval = -ENOSYS;
511 }
512
513 if (retval) {
8d4b9d1b 514 __update_runtime_status(dev, RPM_SUSPENDED);
5e928f77
RW
515 pm_runtime_cancel_pending(dev);
516 } else {
8d4b9d1b 517 __update_runtime_status(dev, RPM_ACTIVE);
5e928f77
RW
518 if (parent)
519 atomic_inc(&parent->power.child_count);
520 }
521 wake_up_all(&dev->power.wait_queue);
522
523 if (!retval)
1bfee5bc 524 __pm_runtime_idle(dev, RPM_ASYNC);
5e928f77
RW
525
526 out:
527 if (parent) {
528 spin_unlock_irq(&dev->power.lock);
529
530 pm_runtime_put(parent);
531
532 spin_lock_irq(&dev->power.lock);
533 }
534
3f9af051 535 dev_dbg(dev, "%s returns %d\n", __func__, retval);
5e928f77
RW
536
537 return retval;
538}
539
540/**
541 * pm_runtime_resume - Carry out run-time resume of given device.
542 * @dev: Device to suspend.
543 */
544int pm_runtime_resume(struct device *dev)
545{
546 int retval;
547
548 spin_lock_irq(&dev->power.lock);
3f9af051 549 retval = __pm_runtime_resume(dev, 0);
5e928f77
RW
550 spin_unlock_irq(&dev->power.lock);
551
552 return retval;
553}
554EXPORT_SYMBOL_GPL(pm_runtime_resume);
555
556/**
557 * pm_runtime_work - Universal run-time PM work function.
558 * @work: Work structure used for scheduling the execution of this function.
559 *
560 * Use @work to get the device object the work is to be done for, determine what
561 * is to be done and execute the appropriate run-time PM function.
562 */
563static void pm_runtime_work(struct work_struct *work)
564{
565 struct device *dev = container_of(work, struct device, power.work);
566 enum rpm_request req;
567
568 spin_lock_irq(&dev->power.lock);
569
570 if (!dev->power.request_pending)
571 goto out;
572
573 req = dev->power.request;
574 dev->power.request = RPM_REQ_NONE;
575 dev->power.request_pending = false;
576
577 switch (req) {
578 case RPM_REQ_NONE:
579 break;
580 case RPM_REQ_IDLE:
1bfee5bc 581 __pm_runtime_idle(dev, RPM_NOWAIT);
5e928f77
RW
582 break;
583 case RPM_REQ_SUSPEND:
3f9af051 584 __pm_runtime_suspend(dev, RPM_NOWAIT);
5e928f77
RW
585 break;
586 case RPM_REQ_RESUME:
3f9af051 587 __pm_runtime_resume(dev, RPM_NOWAIT);
5e928f77
RW
588 break;
589 }
590
591 out:
592 spin_unlock_irq(&dev->power.lock);
593}
594
5e928f77
RW
595/**
596 * pm_request_idle - Submit an idle notification request for given device.
597 * @dev: Device to handle.
598 */
599int pm_request_idle(struct device *dev)
600{
601 unsigned long flags;
602 int retval;
603
604 spin_lock_irqsave(&dev->power.lock, flags);
1bfee5bc 605 retval = __pm_runtime_idle(dev, RPM_ASYNC);
5e928f77
RW
606 spin_unlock_irqrestore(&dev->power.lock, flags);
607
608 return retval;
609}
610EXPORT_SYMBOL_GPL(pm_request_idle);
611
5e928f77
RW
612/**
613 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
614 * @data: Device pointer passed by pm_schedule_suspend().
615 *
1bfee5bc 616 * Check if the time is right and queue a suspend request.
5e928f77
RW
617 */
618static void pm_suspend_timer_fn(unsigned long data)
619{
620 struct device *dev = (struct device *)data;
621 unsigned long flags;
622 unsigned long expires;
623
624 spin_lock_irqsave(&dev->power.lock, flags);
625
626 expires = dev->power.timer_expires;
627 /* If 'expire' is after 'jiffies' we've been called too early. */
628 if (expires > 0 && !time_after(expires, jiffies)) {
629 dev->power.timer_expires = 0;
1bfee5bc 630 __pm_runtime_suspend(dev, RPM_ASYNC);
5e928f77
RW
631 }
632
633 spin_unlock_irqrestore(&dev->power.lock, flags);
634}
635
636/**
637 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
638 * @dev: Device to suspend.
639 * @delay: Time to wait before submitting a suspend request, in milliseconds.
640 */
641int pm_schedule_suspend(struct device *dev, unsigned int delay)
642{
643 unsigned long flags;
1bfee5bc 644 int retval;
5e928f77
RW
645
646 spin_lock_irqsave(&dev->power.lock, flags);
647
5e928f77 648 if (!delay) {
1bfee5bc 649 retval = __pm_runtime_suspend(dev, RPM_ASYNC);
5e928f77
RW
650 goto out;
651 }
652
1bfee5bc 653 retval = rpm_check_suspend_allowed(dev);
5e928f77
RW
654 if (retval)
655 goto out;
656
1bfee5bc
AS
657 /* Other scheduled or pending requests need to be canceled. */
658 pm_runtime_cancel_pending(dev);
659
5e928f77 660 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
1bfee5bc 661 dev->power.timer_expires += !dev->power.timer_expires;
5e928f77
RW
662 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
663
664 out:
665 spin_unlock_irqrestore(&dev->power.lock, flags);
666
667 return retval;
668}
669EXPORT_SYMBOL_GPL(pm_schedule_suspend);
670
5e928f77
RW
671/**
672 * pm_request_resume - Submit a resume request for given device.
673 * @dev: Device to resume.
674 */
675int pm_request_resume(struct device *dev)
676{
677 unsigned long flags;
678 int retval;
679
680 spin_lock_irqsave(&dev->power.lock, flags);
1bfee5bc 681 retval = __pm_runtime_resume(dev, RPM_ASYNC);
5e928f77
RW
682 spin_unlock_irqrestore(&dev->power.lock, flags);
683
684 return retval;
685}
686EXPORT_SYMBOL_GPL(pm_request_resume);
687
688/**
689 * __pm_runtime_get - Reference count a device and wake it up, if necessary.
690 * @dev: Device to handle.
3f9af051 691 * @rpmflags: Flag bits.
5e928f77 692 *
1d531c14 693 * Increment the usage count of the device and resume it or submit a resume
3f9af051 694 * request for it, depending on the RPM_ASYNC flag bit.
5e928f77 695 */
3f9af051 696int __pm_runtime_get(struct device *dev, int rpmflags)
5e928f77 697{
1d531c14 698 int retval;
5e928f77 699
1d531c14 700 atomic_inc(&dev->power.usage_count);
3f9af051
AS
701 retval = (rpmflags & RPM_ASYNC) ?
702 pm_request_resume(dev) : pm_runtime_resume(dev);
5e928f77
RW
703
704 return retval;
705}
706EXPORT_SYMBOL_GPL(__pm_runtime_get);
707
708/**
709 * __pm_runtime_put - Decrement the device's usage counter and notify its bus.
710 * @dev: Device to handle.
3f9af051 711 * @rpmflags: Flag bits.
5e928f77
RW
712 *
713 * Decrement the usage count of the device and if it reaches zero, carry out a
714 * synchronous idle notification or submit an idle notification request for it,
3f9af051 715 * depending on the RPM_ASYNC flag bit.
5e928f77 716 */
3f9af051 717int __pm_runtime_put(struct device *dev, int rpmflags)
5e928f77
RW
718{
719 int retval = 0;
720
721 if (atomic_dec_and_test(&dev->power.usage_count))
3f9af051
AS
722 retval = (rpmflags & RPM_ASYNC) ?
723 pm_request_idle(dev) : pm_runtime_idle(dev);
5e928f77
RW
724
725 return retval;
726}
727EXPORT_SYMBOL_GPL(__pm_runtime_put);
728
729/**
730 * __pm_runtime_set_status - Set run-time PM status of a device.
731 * @dev: Device to handle.
732 * @status: New run-time PM status of the device.
733 *
734 * If run-time PM of the device is disabled or its power.runtime_error field is
735 * different from zero, the status may be changed either to RPM_ACTIVE, or to
736 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
737 * However, if the device has a parent and the parent is not active, and the
738 * parent's power.ignore_children flag is unset, the device's status cannot be
739 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
740 *
741 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
742 * and the device parent's counter of unsuspended children is modified to
743 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
744 * notification request for the parent is submitted.
745 */
746int __pm_runtime_set_status(struct device *dev, unsigned int status)
747{
748 struct device *parent = dev->parent;
749 unsigned long flags;
750 bool notify_parent = false;
751 int error = 0;
752
753 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
754 return -EINVAL;
755
756 spin_lock_irqsave(&dev->power.lock, flags);
757
758 if (!dev->power.runtime_error && !dev->power.disable_depth) {
759 error = -EAGAIN;
760 goto out;
761 }
762
763 if (dev->power.runtime_status == status)
764 goto out_set;
765
766 if (status == RPM_SUSPENDED) {
767 /* It always is possible to set the status to 'suspended'. */
768 if (parent) {
769 atomic_add_unless(&parent->power.child_count, -1, 0);
770 notify_parent = !parent->power.ignore_children;
771 }
772 goto out_set;
773 }
774
775 if (parent) {
bab636b9 776 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
5e928f77
RW
777
778 /*
779 * It is invalid to put an active child under a parent that is
780 * not active, has run-time PM enabled and the
781 * 'power.ignore_children' flag unset.
782 */
783 if (!parent->power.disable_depth
784 && !parent->power.ignore_children
965c4ac0 785 && parent->power.runtime_status != RPM_ACTIVE)
5e928f77 786 error = -EBUSY;
965c4ac0
RW
787 else if (dev->power.runtime_status == RPM_SUSPENDED)
788 atomic_inc(&parent->power.child_count);
5e928f77 789
862f89b3 790 spin_unlock(&parent->power.lock);
5e928f77
RW
791
792 if (error)
793 goto out;
794 }
795
796 out_set:
8d4b9d1b 797 __update_runtime_status(dev, status);
5e928f77
RW
798 dev->power.runtime_error = 0;
799 out:
800 spin_unlock_irqrestore(&dev->power.lock, flags);
801
802 if (notify_parent)
803 pm_request_idle(parent);
804
805 return error;
806}
807EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
808
809/**
810 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
811 * @dev: Device to handle.
812 *
813 * Flush all pending requests for the device from pm_wq and wait for all
814 * run-time PM operations involving the device in progress to complete.
815 *
816 * Should be called under dev->power.lock with interrupts disabled.
817 */
818static void __pm_runtime_barrier(struct device *dev)
819{
820 pm_runtime_deactivate_timer(dev);
821
822 if (dev->power.request_pending) {
823 dev->power.request = RPM_REQ_NONE;
824 spin_unlock_irq(&dev->power.lock);
825
826 cancel_work_sync(&dev->power.work);
827
828 spin_lock_irq(&dev->power.lock);
829 dev->power.request_pending = false;
830 }
831
832 if (dev->power.runtime_status == RPM_SUSPENDING
833 || dev->power.runtime_status == RPM_RESUMING
834 || dev->power.idle_notification) {
835 DEFINE_WAIT(wait);
836
837 /* Suspend, wake-up or idle notification in progress. */
838 for (;;) {
839 prepare_to_wait(&dev->power.wait_queue, &wait,
840 TASK_UNINTERRUPTIBLE);
841 if (dev->power.runtime_status != RPM_SUSPENDING
842 && dev->power.runtime_status != RPM_RESUMING
843 && !dev->power.idle_notification)
844 break;
845 spin_unlock_irq(&dev->power.lock);
846
847 schedule();
848
849 spin_lock_irq(&dev->power.lock);
850 }
851 finish_wait(&dev->power.wait_queue, &wait);
852 }
853}
854
855/**
856 * pm_runtime_barrier - Flush pending requests and wait for completions.
857 * @dev: Device to handle.
858 *
859 * Prevent the device from being suspended by incrementing its usage counter and
860 * if there's a pending resume request for the device, wake the device up.
861 * Next, make sure that all pending requests for the device have been flushed
862 * from pm_wq and wait for all run-time PM operations involving the device in
863 * progress to complete.
864 *
865 * Return value:
866 * 1, if there was a resume request pending and the device had to be woken up,
867 * 0, otherwise
868 */
869int pm_runtime_barrier(struct device *dev)
870{
871 int retval = 0;
872
873 pm_runtime_get_noresume(dev);
874 spin_lock_irq(&dev->power.lock);
875
876 if (dev->power.request_pending
877 && dev->power.request == RPM_REQ_RESUME) {
3f9af051 878 __pm_runtime_resume(dev, 0);
5e928f77
RW
879 retval = 1;
880 }
881
882 __pm_runtime_barrier(dev);
883
884 spin_unlock_irq(&dev->power.lock);
885 pm_runtime_put_noidle(dev);
886
887 return retval;
888}
889EXPORT_SYMBOL_GPL(pm_runtime_barrier);
890
891/**
892 * __pm_runtime_disable - Disable run-time PM of a device.
893 * @dev: Device to handle.
894 * @check_resume: If set, check if there's a resume request for the device.
895 *
896 * Increment power.disable_depth for the device and if was zero previously,
897 * cancel all pending run-time PM requests for the device and wait for all
898 * operations in progress to complete. The device can be either active or
899 * suspended after its run-time PM has been disabled.
900 *
901 * If @check_resume is set and there's a resume request pending when
902 * __pm_runtime_disable() is called and power.disable_depth is zero, the
903 * function will wake up the device before disabling its run-time PM.
904 */
905void __pm_runtime_disable(struct device *dev, bool check_resume)
906{
907 spin_lock_irq(&dev->power.lock);
908
909 if (dev->power.disable_depth > 0) {
910 dev->power.disable_depth++;
911 goto out;
912 }
913
914 /*
915 * Wake up the device if there's a resume request pending, because that
916 * means there probably is some I/O to process and disabling run-time PM
917 * shouldn't prevent the device from processing the I/O.
918 */
919 if (check_resume && dev->power.request_pending
920 && dev->power.request == RPM_REQ_RESUME) {
921 /*
922 * Prevent suspends and idle notifications from being carried
923 * out after we have woken up the device.
924 */
925 pm_runtime_get_noresume(dev);
926
3f9af051 927 __pm_runtime_resume(dev, 0);
5e928f77
RW
928
929 pm_runtime_put_noidle(dev);
930 }
931
932 if (!dev->power.disable_depth++)
933 __pm_runtime_barrier(dev);
934
935 out:
936 spin_unlock_irq(&dev->power.lock);
937}
938EXPORT_SYMBOL_GPL(__pm_runtime_disable);
939
940/**
941 * pm_runtime_enable - Enable run-time PM of a device.
942 * @dev: Device to handle.
943 */
944void pm_runtime_enable(struct device *dev)
945{
946 unsigned long flags;
947
948 spin_lock_irqsave(&dev->power.lock, flags);
949
950 if (dev->power.disable_depth > 0)
951 dev->power.disable_depth--;
952 else
953 dev_warn(dev, "Unbalanced %s!\n", __func__);
954
955 spin_unlock_irqrestore(&dev->power.lock, flags);
956}
957EXPORT_SYMBOL_GPL(pm_runtime_enable);
958
53823639
RW
959/**
960 * pm_runtime_forbid - Block run-time PM of a device.
961 * @dev: Device to handle.
962 *
963 * Increase the device's usage count and clear its power.runtime_auto flag,
964 * so that it cannot be suspended at run time until pm_runtime_allow() is called
965 * for it.
966 */
967void pm_runtime_forbid(struct device *dev)
968{
969 spin_lock_irq(&dev->power.lock);
970 if (!dev->power.runtime_auto)
971 goto out;
972
973 dev->power.runtime_auto = false;
974 atomic_inc(&dev->power.usage_count);
3f9af051 975 __pm_runtime_resume(dev, 0);
53823639
RW
976
977 out:
978 spin_unlock_irq(&dev->power.lock);
979}
980EXPORT_SYMBOL_GPL(pm_runtime_forbid);
981
982/**
983 * pm_runtime_allow - Unblock run-time PM of a device.
984 * @dev: Device to handle.
985 *
986 * Decrease the device's usage count and set its power.runtime_auto flag.
987 */
988void pm_runtime_allow(struct device *dev)
989{
990 spin_lock_irq(&dev->power.lock);
991 if (dev->power.runtime_auto)
992 goto out;
993
994 dev->power.runtime_auto = true;
995 if (atomic_dec_and_test(&dev->power.usage_count))
1bfee5bc 996 __pm_runtime_idle(dev, 0);
53823639
RW
997
998 out:
999 spin_unlock_irq(&dev->power.lock);
1000}
1001EXPORT_SYMBOL_GPL(pm_runtime_allow);
1002
5e928f77
RW
1003/**
1004 * pm_runtime_init - Initialize run-time PM fields in given device object.
1005 * @dev: Device object to initialize.
1006 */
1007void pm_runtime_init(struct device *dev)
1008{
5e928f77
RW
1009 dev->power.runtime_status = RPM_SUSPENDED;
1010 dev->power.idle_notification = false;
1011
1012 dev->power.disable_depth = 1;
1013 atomic_set(&dev->power.usage_count, 0);
1014
1015 dev->power.runtime_error = 0;
1016
1017 atomic_set(&dev->power.child_count, 0);
1018 pm_suspend_ignore_children(dev, false);
53823639 1019 dev->power.runtime_auto = true;
5e928f77
RW
1020
1021 dev->power.request_pending = false;
1022 dev->power.request = RPM_REQ_NONE;
1023 dev->power.deferred_resume = false;
8d4b9d1b 1024 dev->power.accounting_timestamp = jiffies;
5e928f77
RW
1025 INIT_WORK(&dev->power.work, pm_runtime_work);
1026
1027 dev->power.timer_expires = 0;
1028 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1029 (unsigned long)dev);
1030
1031 init_waitqueue_head(&dev->power.wait_queue);
1032}
1033
1034/**
1035 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1036 * @dev: Device object being removed from device hierarchy.
1037 */
1038void pm_runtime_remove(struct device *dev)
1039{
1040 __pm_runtime_disable(dev, false);
1041
1042 /* Change the status back to 'suspended' to match the initial status. */
1043 if (dev->power.runtime_status == RPM_ACTIVE)
1044 pm_runtime_set_suspended(dev);
1045}