Merge back 'pm-domains' material for 3.19-rc1.
[linux-block.git] / drivers / base / power / domain.c
1 /*
2  * drivers/base/power/domain.c - Common code related to device power domains.
3  *
4  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5  *
6  * This file is released under the GPLv2.
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/io.h>
11 #include <linux/platform_device.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_domain.h>
14 #include <linux/pm_qos.h>
15 #include <linux/slab.h>
16 #include <linux/err.h>
17 #include <linux/sched.h>
18 #include <linux/suspend.h>
19 #include <linux/export.h>
20
21 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)          \
22 ({                                                              \
23         type (*__routine)(struct device *__d);                  \
24         type __ret = (type)0;                                   \
25                                                                 \
26         __routine = genpd->dev_ops.callback;                    \
27         if (__routine) {                                        \
28                 __ret = __routine(dev);                         \
29         }                                                       \
30         __ret;                                                  \
31 })
32
33 #define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name)       \
34 ({                                                                              \
35         ktime_t __start = ktime_get();                                          \
36         type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev);         \
37         s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start));           \
38         struct gpd_timing_data *__td = &dev_gpd_data(dev)->td;                  \
39         if (!__retval && __elapsed > __td->field) {                             \
40                 __td->field = __elapsed;                                        \
41                 dev_dbg(dev, name " latency exceeded, new value %lld ns\n",     \
42                         __elapsed);                                             \
43                 genpd->max_off_time_changed = true;                             \
44                 __td->constraint_changed = true;                                \
45         }                                                                       \
46         __retval;                                                               \
47 })
48
49 static LIST_HEAD(gpd_list);
50 static DEFINE_MUTEX(gpd_list_lock);
51
52 static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
53 {
54         struct generic_pm_domain *genpd = NULL, *gpd;
55
56         if (IS_ERR_OR_NULL(domain_name))
57                 return NULL;
58
59         mutex_lock(&gpd_list_lock);
60         list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
61                 if (!strcmp(gpd->name, domain_name)) {
62                         genpd = gpd;
63                         break;
64                 }
65         }
66         mutex_unlock(&gpd_list_lock);
67         return genpd;
68 }
69
70 struct generic_pm_domain *dev_to_genpd(struct device *dev)
71 {
72         if (IS_ERR_OR_NULL(dev->pm_domain))
73                 return ERR_PTR(-EINVAL);
74
75         return pd_to_genpd(dev->pm_domain);
76 }
77
78 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
79 {
80         return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
81                                         stop_latency_ns, "stop");
82 }
83
84 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
85 {
86         return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
87                                         start_latency_ns, "start");
88 }
89
90 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
91 {
92         bool ret = false;
93
94         if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
95                 ret = !!atomic_dec_and_test(&genpd->sd_count);
96
97         return ret;
98 }
99
100 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
101 {
102         atomic_inc(&genpd->sd_count);
103         smp_mb__after_atomic();
104 }
105
106 static void genpd_acquire_lock(struct generic_pm_domain *genpd)
107 {
108         DEFINE_WAIT(wait);
109
110         mutex_lock(&genpd->lock);
111         /*
112          * Wait for the domain to transition into either the active,
113          * or the power off state.
114          */
115         for (;;) {
116                 prepare_to_wait(&genpd->status_wait_queue, &wait,
117                                 TASK_UNINTERRUPTIBLE);
118                 if (genpd->status == GPD_STATE_ACTIVE
119                     || genpd->status == GPD_STATE_POWER_OFF)
120                         break;
121                 mutex_unlock(&genpd->lock);
122
123                 schedule();
124
125                 mutex_lock(&genpd->lock);
126         }
127         finish_wait(&genpd->status_wait_queue, &wait);
128 }
129
130 static void genpd_release_lock(struct generic_pm_domain *genpd)
131 {
132         mutex_unlock(&genpd->lock);
133 }
134
135 static void genpd_set_active(struct generic_pm_domain *genpd)
136 {
137         if (genpd->resume_count == 0)
138                 genpd->status = GPD_STATE_ACTIVE;
139 }
140
141 static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
142 {
143         s64 usecs64;
144
145         if (!genpd->cpuidle_data)
146                 return;
147
148         usecs64 = genpd->power_on_latency_ns;
149         do_div(usecs64, NSEC_PER_USEC);
150         usecs64 += genpd->cpuidle_data->saved_exit_latency;
151         genpd->cpuidle_data->idle_state->exit_latency = usecs64;
152 }
153
154 static int genpd_power_on(struct generic_pm_domain *genpd)
155 {
156         ktime_t time_start;
157         s64 elapsed_ns;
158         int ret;
159
160         if (!genpd->power_on)
161                 return 0;
162
163         time_start = ktime_get();
164         ret = genpd->power_on(genpd);
165         if (ret)
166                 return ret;
167
168         elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
169         if (elapsed_ns <= genpd->power_on_latency_ns)
170                 return ret;
171
172         genpd->power_on_latency_ns = elapsed_ns;
173         genpd->max_off_time_changed = true;
174         genpd_recalc_cpu_exit_latency(genpd);
175         pr_warn("%s: Power-%s latency exceeded, new value %lld ns\n",
176                 genpd->name, "on", elapsed_ns);
177
178         return ret;
179 }
180
181 static int genpd_power_off(struct generic_pm_domain *genpd)
182 {
183         ktime_t time_start;
184         s64 elapsed_ns;
185         int ret;
186
187         if (!genpd->power_off)
188                 return 0;
189
190         time_start = ktime_get();
191         ret = genpd->power_off(genpd);
192         if (ret == -EBUSY)
193                 return ret;
194
195         elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
196         if (elapsed_ns <= genpd->power_off_latency_ns)
197                 return ret;
198
199         genpd->power_off_latency_ns = elapsed_ns;
200         genpd->max_off_time_changed = true;
201         pr_warn("%s: Power-%s latency exceeded, new value %lld ns\n",
202                 genpd->name, "off", elapsed_ns);
203
204         return ret;
205 }
206
207 /**
208  * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
209  * @genpd: PM domain to power up.
210  *
211  * Restore power to @genpd and all of its masters so that it is possible to
212  * resume a device belonging to it.
213  */
214 static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
215         __releases(&genpd->lock) __acquires(&genpd->lock)
216 {
217         struct gpd_link *link;
218         DEFINE_WAIT(wait);
219         int ret = 0;
220
221         /* If the domain's master is being waited for, we have to wait too. */
222         for (;;) {
223                 prepare_to_wait(&genpd->status_wait_queue, &wait,
224                                 TASK_UNINTERRUPTIBLE);
225                 if (genpd->status != GPD_STATE_WAIT_MASTER)
226                         break;
227                 mutex_unlock(&genpd->lock);
228
229                 schedule();
230
231                 mutex_lock(&genpd->lock);
232         }
233         finish_wait(&genpd->status_wait_queue, &wait);
234
235         if (genpd->status == GPD_STATE_ACTIVE
236             || (genpd->prepared_count > 0 && genpd->suspend_power_off))
237                 return 0;
238
239         if (genpd->status != GPD_STATE_POWER_OFF) {
240                 genpd_set_active(genpd);
241                 return 0;
242         }
243
244         if (genpd->cpuidle_data) {
245                 cpuidle_pause_and_lock();
246                 genpd->cpuidle_data->idle_state->disabled = true;
247                 cpuidle_resume_and_unlock();
248                 goto out;
249         }
250
251         /*
252          * The list is guaranteed not to change while the loop below is being
253          * executed, unless one of the masters' .power_on() callbacks fiddles
254          * with it.
255          */
256         list_for_each_entry(link, &genpd->slave_links, slave_node) {
257                 genpd_sd_counter_inc(link->master);
258                 genpd->status = GPD_STATE_WAIT_MASTER;
259
260                 mutex_unlock(&genpd->lock);
261
262                 ret = pm_genpd_poweron(link->master);
263
264                 mutex_lock(&genpd->lock);
265
266                 /*
267                  * The "wait for parent" status is guaranteed not to change
268                  * while the master is powering on.
269                  */
270                 genpd->status = GPD_STATE_POWER_OFF;
271                 wake_up_all(&genpd->status_wait_queue);
272                 if (ret) {
273                         genpd_sd_counter_dec(link->master);
274                         goto err;
275                 }
276         }
277
278         ret = genpd_power_on(genpd);
279         if (ret)
280                 goto err;
281
282  out:
283         genpd_set_active(genpd);
284
285         return 0;
286
287  err:
288         list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
289                 genpd_sd_counter_dec(link->master);
290
291         return ret;
292 }
293
294 /**
295  * pm_genpd_poweron - Restore power to a given PM domain and its masters.
296  * @genpd: PM domain to power up.
297  */
298 int pm_genpd_poweron(struct generic_pm_domain *genpd)
299 {
300         int ret;
301
302         mutex_lock(&genpd->lock);
303         ret = __pm_genpd_poweron(genpd);
304         mutex_unlock(&genpd->lock);
305         return ret;
306 }
307
308 /**
309  * pm_genpd_name_poweron - Restore power to a given PM domain and its masters.
310  * @domain_name: Name of the PM domain to power up.
311  */
312 int pm_genpd_name_poweron(const char *domain_name)
313 {
314         struct generic_pm_domain *genpd;
315
316         genpd = pm_genpd_lookup_name(domain_name);
317         return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
318 }
319
320 #ifdef CONFIG_PM_RUNTIME
321
322 static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
323                                      struct device *dev)
324 {
325         return GENPD_DEV_CALLBACK(genpd, int, start, dev);
326 }
327
328 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
329 {
330         return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
331                                         save_state_latency_ns, "state save");
332 }
333
334 static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
335 {
336         return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
337                                         restore_state_latency_ns,
338                                         "state restore");
339 }
340
341 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
342                                      unsigned long val, void *ptr)
343 {
344         struct generic_pm_domain_data *gpd_data;
345         struct device *dev;
346
347         gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
348
349         mutex_lock(&gpd_data->lock);
350         dev = gpd_data->base.dev;
351         if (!dev) {
352                 mutex_unlock(&gpd_data->lock);
353                 return NOTIFY_DONE;
354         }
355         mutex_unlock(&gpd_data->lock);
356
357         for (;;) {
358                 struct generic_pm_domain *genpd;
359                 struct pm_domain_data *pdd;
360
361                 spin_lock_irq(&dev->power.lock);
362
363                 pdd = dev->power.subsys_data ?
364                                 dev->power.subsys_data->domain_data : NULL;
365                 if (pdd && pdd->dev) {
366                         to_gpd_data(pdd)->td.constraint_changed = true;
367                         genpd = dev_to_genpd(dev);
368                 } else {
369                         genpd = ERR_PTR(-ENODATA);
370                 }
371
372                 spin_unlock_irq(&dev->power.lock);
373
374                 if (!IS_ERR(genpd)) {
375                         mutex_lock(&genpd->lock);
376                         genpd->max_off_time_changed = true;
377                         mutex_unlock(&genpd->lock);
378                 }
379
380                 dev = dev->parent;
381                 if (!dev || dev->power.ignore_children)
382                         break;
383         }
384
385         return NOTIFY_DONE;
386 }
387
388 /**
389  * __pm_genpd_save_device - Save the pre-suspend state of a device.
390  * @pdd: Domain data of the device to save the state of.
391  * @genpd: PM domain the device belongs to.
392  */
393 static int __pm_genpd_save_device(struct pm_domain_data *pdd,
394                                   struct generic_pm_domain *genpd)
395         __releases(&genpd->lock) __acquires(&genpd->lock)
396 {
397         struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
398         struct device *dev = pdd->dev;
399         int ret = 0;
400
401         if (gpd_data->need_restore > 0)
402                 return 0;
403
404         /*
405          * If the value of the need_restore flag is still unknown at this point,
406          * we trust that pm_genpd_poweroff() has verified that the device is
407          * already runtime PM suspended.
408          */
409         if (gpd_data->need_restore < 0) {
410                 gpd_data->need_restore = 1;
411                 return 0;
412         }
413
414         mutex_unlock(&genpd->lock);
415
416         genpd_start_dev(genpd, dev);
417         ret = genpd_save_dev(genpd, dev);
418         genpd_stop_dev(genpd, dev);
419
420         mutex_lock(&genpd->lock);
421
422         if (!ret)
423                 gpd_data->need_restore = 1;
424
425         return ret;
426 }
427
428 /**
429  * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
430  * @pdd: Domain data of the device to restore the state of.
431  * @genpd: PM domain the device belongs to.
432  */
433 static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
434                                       struct generic_pm_domain *genpd)
435         __releases(&genpd->lock) __acquires(&genpd->lock)
436 {
437         struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
438         struct device *dev = pdd->dev;
439         int need_restore = gpd_data->need_restore;
440
441         gpd_data->need_restore = 0;
442         mutex_unlock(&genpd->lock);
443
444         genpd_start_dev(genpd, dev);
445
446         /*
447          * Call genpd_restore_dev() for recently added devices too (need_restore
448          * is negative then).
449          */
450         if (need_restore)
451                 genpd_restore_dev(genpd, dev);
452
453         mutex_lock(&genpd->lock);
454 }
455
456 /**
457  * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
458  * @genpd: PM domain to check.
459  *
460  * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
461  * a "power off" operation, which means that a "power on" has occured in the
462  * meantime, or if its resume_count field is different from zero, which means
463  * that one of its devices has been resumed in the meantime.
464  */
465 static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
466 {
467         return genpd->status == GPD_STATE_WAIT_MASTER
468                 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
469 }
470
471 /**
472  * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
473  * @genpd: PM domait to power off.
474  *
475  * Queue up the execution of pm_genpd_poweroff() unless it's already been done
476  * before.
477  */
478 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
479 {
480         queue_work(pm_wq, &genpd->power_off_work);
481 }
482
483 /**
484  * pm_genpd_poweroff - Remove power from a given PM domain.
485  * @genpd: PM domain to power down.
486  *
487  * If all of the @genpd's devices have been suspended and all of its subdomains
488  * have been powered down, run the runtime suspend callbacks provided by all of
489  * the @genpd's devices' drivers and remove power from @genpd.
490  */
491 static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
492         __releases(&genpd->lock) __acquires(&genpd->lock)
493 {
494         struct pm_domain_data *pdd;
495         struct gpd_link *link;
496         unsigned int not_suspended;
497         int ret = 0;
498
499  start:
500         /*
501          * Do not try to power off the domain in the following situations:
502          * (1) The domain is already in the "power off" state.
503          * (2) The domain is waiting for its master to power up.
504          * (3) One of the domain's devices is being resumed right now.
505          * (4) System suspend is in progress.
506          */
507         if (genpd->status == GPD_STATE_POWER_OFF
508             || genpd->status == GPD_STATE_WAIT_MASTER
509             || genpd->resume_count > 0 || genpd->prepared_count > 0)
510                 return 0;
511
512         if (atomic_read(&genpd->sd_count) > 0)
513                 return -EBUSY;
514
515         not_suspended = 0;
516         list_for_each_entry(pdd, &genpd->dev_list, list_node) {
517                 enum pm_qos_flags_status stat;
518
519                 stat = dev_pm_qos_flags(pdd->dev,
520                                         PM_QOS_FLAG_NO_POWER_OFF
521                                                 | PM_QOS_FLAG_REMOTE_WAKEUP);
522                 if (stat > PM_QOS_FLAGS_NONE)
523                         return -EBUSY;
524
525                 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
526                     || pdd->dev->power.irq_safe))
527                         not_suspended++;
528         }
529
530         if (not_suspended > genpd->in_progress)
531                 return -EBUSY;
532
533         if (genpd->poweroff_task) {
534                 /*
535                  * Another instance of pm_genpd_poweroff() is executing
536                  * callbacks, so tell it to start over and return.
537                  */
538                 genpd->status = GPD_STATE_REPEAT;
539                 return 0;
540         }
541
542         if (genpd->gov && genpd->gov->power_down_ok) {
543                 if (!genpd->gov->power_down_ok(&genpd->domain))
544                         return -EAGAIN;
545         }
546
547         genpd->status = GPD_STATE_BUSY;
548         genpd->poweroff_task = current;
549
550         list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
551                 ret = atomic_read(&genpd->sd_count) == 0 ?
552                         __pm_genpd_save_device(pdd, genpd) : -EBUSY;
553
554                 if (genpd_abort_poweroff(genpd))
555                         goto out;
556
557                 if (ret) {
558                         genpd_set_active(genpd);
559                         goto out;
560                 }
561
562                 if (genpd->status == GPD_STATE_REPEAT) {
563                         genpd->poweroff_task = NULL;
564                         goto start;
565                 }
566         }
567
568         if (genpd->cpuidle_data) {
569                 /*
570                  * If cpuidle_data is set, cpuidle should turn the domain off
571                  * when the CPU in it is idle.  In that case we don't decrement
572                  * the subdomain counts of the master domains, so that power is
573                  * not removed from the current domain prematurely as a result
574                  * of cutting off the masters' power.
575                  */
576                 genpd->status = GPD_STATE_POWER_OFF;
577                 cpuidle_pause_and_lock();
578                 genpd->cpuidle_data->idle_state->disabled = false;
579                 cpuidle_resume_and_unlock();
580                 goto out;
581         }
582
583         if (genpd->power_off) {
584                 if (atomic_read(&genpd->sd_count) > 0) {
585                         ret = -EBUSY;
586                         goto out;
587                 }
588
589                 /*
590                  * If sd_count > 0 at this point, one of the subdomains hasn't
591                  * managed to call pm_genpd_poweron() for the master yet after
592                  * incrementing it.  In that case pm_genpd_poweron() will wait
593                  * for us to drop the lock, so we can call .power_off() and let
594                  * the pm_genpd_poweron() restore power for us (this shouldn't
595                  * happen very often).
596                  */
597                 ret = genpd_power_off(genpd);
598                 if (ret == -EBUSY) {
599                         genpd_set_active(genpd);
600                         goto out;
601                 }
602         }
603
604         genpd->status = GPD_STATE_POWER_OFF;
605
606         list_for_each_entry(link, &genpd->slave_links, slave_node) {
607                 genpd_sd_counter_dec(link->master);
608                 genpd_queue_power_off_work(link->master);
609         }
610
611  out:
612         genpd->poweroff_task = NULL;
613         wake_up_all(&genpd->status_wait_queue);
614         return ret;
615 }
616
617 /**
618  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
619  * @work: Work structure used for scheduling the execution of this function.
620  */
621 static void genpd_power_off_work_fn(struct work_struct *work)
622 {
623         struct generic_pm_domain *genpd;
624
625         genpd = container_of(work, struct generic_pm_domain, power_off_work);
626
627         genpd_acquire_lock(genpd);
628         pm_genpd_poweroff(genpd);
629         genpd_release_lock(genpd);
630 }
631
632 /**
633  * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
634  * @dev: Device to suspend.
635  *
636  * Carry out a runtime suspend of a device under the assumption that its
637  * pm_domain field points to the domain member of an object of type
638  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
639  */
640 static int pm_genpd_runtime_suspend(struct device *dev)
641 {
642         struct generic_pm_domain *genpd;
643         struct generic_pm_domain_data *gpd_data;
644         bool (*stop_ok)(struct device *__dev);
645         int ret;
646
647         dev_dbg(dev, "%s()\n", __func__);
648
649         genpd = dev_to_genpd(dev);
650         if (IS_ERR(genpd))
651                 return -EINVAL;
652
653         stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
654         if (stop_ok && !stop_ok(dev))
655                 return -EBUSY;
656
657         ret = genpd_stop_dev(genpd, dev);
658         if (ret)
659                 return ret;
660
661         /*
662          * If power.irq_safe is set, this routine will be run with interrupts
663          * off, so it can't use mutexes.
664          */
665         if (dev->power.irq_safe)
666                 return 0;
667
668         mutex_lock(&genpd->lock);
669
670         /*
671          * If we have an unknown state of the need_restore flag, it means none
672          * of the runtime PM callbacks has been invoked yet. Let's update the
673          * flag to reflect that the current state is active.
674          */
675         gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
676         if (gpd_data->need_restore < 0)
677                 gpd_data->need_restore = 0;
678
679         genpd->in_progress++;
680         pm_genpd_poweroff(genpd);
681         genpd->in_progress--;
682         mutex_unlock(&genpd->lock);
683
684         return 0;
685 }
686
687 /**
688  * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
689  * @dev: Device to resume.
690  *
691  * Carry out a runtime resume of a device under the assumption that its
692  * pm_domain field points to the domain member of an object of type
693  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
694  */
695 static int pm_genpd_runtime_resume(struct device *dev)
696 {
697         struct generic_pm_domain *genpd;
698         DEFINE_WAIT(wait);
699         int ret;
700
701         dev_dbg(dev, "%s()\n", __func__);
702
703         genpd = dev_to_genpd(dev);
704         if (IS_ERR(genpd))
705                 return -EINVAL;
706
707         /* If power.irq_safe, the PM domain is never powered off. */
708         if (dev->power.irq_safe)
709                 return genpd_start_dev_no_timing(genpd, dev);
710
711         mutex_lock(&genpd->lock);
712         ret = __pm_genpd_poweron(genpd);
713         if (ret) {
714                 mutex_unlock(&genpd->lock);
715                 return ret;
716         }
717         genpd->status = GPD_STATE_BUSY;
718         genpd->resume_count++;
719         for (;;) {
720                 prepare_to_wait(&genpd->status_wait_queue, &wait,
721                                 TASK_UNINTERRUPTIBLE);
722                 /*
723                  * If current is the powering off task, we have been called
724                  * reentrantly from one of the device callbacks, so we should
725                  * not wait.
726                  */
727                 if (!genpd->poweroff_task || genpd->poweroff_task == current)
728                         break;
729                 mutex_unlock(&genpd->lock);
730
731                 schedule();
732
733                 mutex_lock(&genpd->lock);
734         }
735         finish_wait(&genpd->status_wait_queue, &wait);
736         __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
737         genpd->resume_count--;
738         genpd_set_active(genpd);
739         wake_up_all(&genpd->status_wait_queue);
740         mutex_unlock(&genpd->lock);
741
742         return 0;
743 }
744
745 static bool pd_ignore_unused;
746 static int __init pd_ignore_unused_setup(char *__unused)
747 {
748         pd_ignore_unused = true;
749         return 1;
750 }
751 __setup("pd_ignore_unused", pd_ignore_unused_setup);
752
753 /**
754  * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
755  */
756 void pm_genpd_poweroff_unused(void)
757 {
758         struct generic_pm_domain *genpd;
759
760         if (pd_ignore_unused) {
761                 pr_warn("genpd: Not disabling unused power domains\n");
762                 return;
763         }
764
765         mutex_lock(&gpd_list_lock);
766
767         list_for_each_entry(genpd, &gpd_list, gpd_list_node)
768                 genpd_queue_power_off_work(genpd);
769
770         mutex_unlock(&gpd_list_lock);
771 }
772
773 static int __init genpd_poweroff_unused(void)
774 {
775         pm_genpd_poweroff_unused();
776         return 0;
777 }
778 late_initcall(genpd_poweroff_unused);
779
780 #else
781
782 static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
783                                             unsigned long val, void *ptr)
784 {
785         return NOTIFY_DONE;
786 }
787
788 static inline void
789 genpd_queue_power_off_work(struct generic_pm_domain *genpd) {}
790
791 static inline void genpd_power_off_work_fn(struct work_struct *work) {}
792
793 #define pm_genpd_runtime_suspend        NULL
794 #define pm_genpd_runtime_resume         NULL
795
796 #endif /* CONFIG_PM_RUNTIME */
797
798 #ifdef CONFIG_PM_SLEEP
799
800 /**
801  * pm_genpd_present - Check if the given PM domain has been initialized.
802  * @genpd: PM domain to check.
803  */
804 static bool pm_genpd_present(const struct generic_pm_domain *genpd)
805 {
806         const struct generic_pm_domain *gpd;
807
808         if (IS_ERR_OR_NULL(genpd))
809                 return false;
810
811         list_for_each_entry(gpd, &gpd_list, gpd_list_node)
812                 if (gpd == genpd)
813                         return true;
814
815         return false;
816 }
817
818 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
819                                     struct device *dev)
820 {
821         return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
822 }
823
824 /**
825  * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
826  * @genpd: PM domain to power off, if possible.
827  *
828  * Check if the given PM domain can be powered off (during system suspend or
829  * hibernation) and do that if so.  Also, in that case propagate to its masters.
830  *
831  * This function is only called in "noirq" and "syscore" stages of system power
832  * transitions, so it need not acquire locks (all of the "noirq" callbacks are
833  * executed sequentially, so it is guaranteed that it will never run twice in
834  * parallel).
835  */
836 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
837 {
838         struct gpd_link *link;
839
840         if (genpd->status == GPD_STATE_POWER_OFF)
841                 return;
842
843         if (genpd->suspended_count != genpd->device_count
844             || atomic_read(&genpd->sd_count) > 0)
845                 return;
846
847         genpd_power_off(genpd);
848
849         genpd->status = GPD_STATE_POWER_OFF;
850
851         list_for_each_entry(link, &genpd->slave_links, slave_node) {
852                 genpd_sd_counter_dec(link->master);
853                 pm_genpd_sync_poweroff(link->master);
854         }
855 }
856
857 /**
858  * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
859  * @genpd: PM domain to power on.
860  *
861  * This function is only called in "noirq" and "syscore" stages of system power
862  * transitions, so it need not acquire locks (all of the "noirq" callbacks are
863  * executed sequentially, so it is guaranteed that it will never run twice in
864  * parallel).
865  */
866 static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
867 {
868         struct gpd_link *link;
869
870         if (genpd->status != GPD_STATE_POWER_OFF)
871                 return;
872
873         list_for_each_entry(link, &genpd->slave_links, slave_node) {
874                 pm_genpd_sync_poweron(link->master);
875                 genpd_sd_counter_inc(link->master);
876         }
877
878         genpd_power_on(genpd);
879
880         genpd->status = GPD_STATE_ACTIVE;
881 }
882
883 /**
884  * resume_needed - Check whether to resume a device before system suspend.
885  * @dev: Device to check.
886  * @genpd: PM domain the device belongs to.
887  *
888  * There are two cases in which a device that can wake up the system from sleep
889  * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
890  * to wake up the system and it has to remain active for this purpose while the
891  * system is in the sleep state and (2) if the device is not enabled to wake up
892  * the system from sleep states and it generally doesn't generate wakeup signals
893  * by itself (those signals are generated on its behalf by other parts of the
894  * system).  In the latter case it may be necessary to reconfigure the device's
895  * wakeup settings during system suspend, because it may have been set up to
896  * signal remote wakeup from the system's working state as needed by runtime PM.
897  * Return 'true' in either of the above cases.
898  */
899 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
900 {
901         bool active_wakeup;
902
903         if (!device_can_wakeup(dev))
904                 return false;
905
906         active_wakeup = genpd_dev_active_wakeup(genpd, dev);
907         return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
908 }
909
910 /**
911  * pm_genpd_prepare - Start power transition of a device in a PM domain.
912  * @dev: Device to start the transition of.
913  *
914  * Start a power transition of a device (during a system-wide power transition)
915  * under the assumption that its pm_domain field points to the domain member of
916  * an object of type struct generic_pm_domain representing a PM domain
917  * consisting of I/O devices.
918  */
919 static int pm_genpd_prepare(struct device *dev)
920 {
921         struct generic_pm_domain *genpd;
922         int ret;
923
924         dev_dbg(dev, "%s()\n", __func__);
925
926         genpd = dev_to_genpd(dev);
927         if (IS_ERR(genpd))
928                 return -EINVAL;
929
930         /*
931          * If a wakeup request is pending for the device, it should be woken up
932          * at this point and a system wakeup event should be reported if it's
933          * set up to wake up the system from sleep states.
934          */
935         pm_runtime_get_noresume(dev);
936         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
937                 pm_wakeup_event(dev, 0);
938
939         if (pm_wakeup_pending()) {
940                 pm_runtime_put(dev);
941                 return -EBUSY;
942         }
943
944         if (resume_needed(dev, genpd))
945                 pm_runtime_resume(dev);
946
947         genpd_acquire_lock(genpd);
948
949         if (genpd->prepared_count++ == 0) {
950                 genpd->suspended_count = 0;
951                 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
952         }
953
954         genpd_release_lock(genpd);
955
956         if (genpd->suspend_power_off) {
957                 pm_runtime_put_noidle(dev);
958                 return 0;
959         }
960
961         /*
962          * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
963          * so pm_genpd_poweron() will return immediately, but if the device
964          * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
965          * to make it operational.
966          */
967         pm_runtime_resume(dev);
968         __pm_runtime_disable(dev, false);
969
970         ret = pm_generic_prepare(dev);
971         if (ret) {
972                 mutex_lock(&genpd->lock);
973
974                 if (--genpd->prepared_count == 0)
975                         genpd->suspend_power_off = false;
976
977                 mutex_unlock(&genpd->lock);
978                 pm_runtime_enable(dev);
979         }
980
981         pm_runtime_put(dev);
982         return ret;
983 }
984
985 /**
986  * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
987  * @dev: Device to suspend.
988  *
989  * Suspend a device under the assumption that its pm_domain field points to the
990  * domain member of an object of type struct generic_pm_domain representing
991  * a PM domain consisting of I/O devices.
992  */
993 static int pm_genpd_suspend(struct device *dev)
994 {
995         struct generic_pm_domain *genpd;
996
997         dev_dbg(dev, "%s()\n", __func__);
998
999         genpd = dev_to_genpd(dev);
1000         if (IS_ERR(genpd))
1001                 return -EINVAL;
1002
1003         return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
1004 }
1005
1006 /**
1007  * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
1008  * @dev: Device to suspend.
1009  *
1010  * Carry out a late suspend of a device under the assumption that its
1011  * pm_domain field points to the domain member of an object of type
1012  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
1013  */
1014 static int pm_genpd_suspend_late(struct device *dev)
1015 {
1016         struct generic_pm_domain *genpd;
1017
1018         dev_dbg(dev, "%s()\n", __func__);
1019
1020         genpd = dev_to_genpd(dev);
1021         if (IS_ERR(genpd))
1022                 return -EINVAL;
1023
1024         return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev);
1025 }
1026
1027 /**
1028  * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1029  * @dev: Device to suspend.
1030  *
1031  * Stop the device and remove power from the domain if all devices in it have
1032  * been stopped.
1033  */
1034 static int pm_genpd_suspend_noirq(struct device *dev)
1035 {
1036         struct generic_pm_domain *genpd;
1037
1038         dev_dbg(dev, "%s()\n", __func__);
1039
1040         genpd = dev_to_genpd(dev);
1041         if (IS_ERR(genpd))
1042                 return -EINVAL;
1043
1044         if (genpd->suspend_power_off
1045             || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
1046                 return 0;
1047
1048         genpd_stop_dev(genpd, dev);
1049
1050         /*
1051          * Since all of the "noirq" callbacks are executed sequentially, it is
1052          * guaranteed that this function will never run twice in parallel for
1053          * the same PM domain, so it is not necessary to use locking here.
1054          */
1055         genpd->suspended_count++;
1056         pm_genpd_sync_poweroff(genpd);
1057
1058         return 0;
1059 }
1060
1061 /**
1062  * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1063  * @dev: Device to resume.
1064  *
1065  * Restore power to the device's PM domain, if necessary, and start the device.
1066  */
1067 static int pm_genpd_resume_noirq(struct device *dev)
1068 {
1069         struct generic_pm_domain *genpd;
1070
1071         dev_dbg(dev, "%s()\n", __func__);
1072
1073         genpd = dev_to_genpd(dev);
1074         if (IS_ERR(genpd))
1075                 return -EINVAL;
1076
1077         if (genpd->suspend_power_off
1078             || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
1079                 return 0;
1080
1081         /*
1082          * Since all of the "noirq" callbacks are executed sequentially, it is
1083          * guaranteed that this function will never run twice in parallel for
1084          * the same PM domain, so it is not necessary to use locking here.
1085          */
1086         pm_genpd_sync_poweron(genpd);
1087         genpd->suspended_count--;
1088
1089         return genpd_start_dev(genpd, dev);
1090 }
1091
1092 /**
1093  * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
1094  * @dev: Device to resume.
1095  *
1096  * Carry out an early resume of a device under the assumption that its
1097  * pm_domain field points to the domain member of an object of type
1098  * struct generic_pm_domain representing a power domain consisting of I/O
1099  * devices.
1100  */
1101 static int pm_genpd_resume_early(struct device *dev)
1102 {
1103         struct generic_pm_domain *genpd;
1104
1105         dev_dbg(dev, "%s()\n", __func__);
1106
1107         genpd = dev_to_genpd(dev);
1108         if (IS_ERR(genpd))
1109                 return -EINVAL;
1110
1111         return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev);
1112 }
1113
1114 /**
1115  * pm_genpd_resume - Resume of device in an I/O PM domain.
1116  * @dev: Device to resume.
1117  *
1118  * Resume a device under the assumption that its pm_domain field points to the
1119  * domain member of an object of type struct generic_pm_domain representing
1120  * a power domain consisting of I/O devices.
1121  */
1122 static int pm_genpd_resume(struct device *dev)
1123 {
1124         struct generic_pm_domain *genpd;
1125
1126         dev_dbg(dev, "%s()\n", __func__);
1127
1128         genpd = dev_to_genpd(dev);
1129         if (IS_ERR(genpd))
1130                 return -EINVAL;
1131
1132         return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
1133 }
1134
1135 /**
1136  * pm_genpd_freeze - Freezing a device in an I/O PM domain.
1137  * @dev: Device to freeze.
1138  *
1139  * Freeze a device under the assumption that its pm_domain field points to the
1140  * domain member of an object of type struct generic_pm_domain representing
1141  * a power domain consisting of I/O devices.
1142  */
1143 static int pm_genpd_freeze(struct device *dev)
1144 {
1145         struct generic_pm_domain *genpd;
1146
1147         dev_dbg(dev, "%s()\n", __func__);
1148
1149         genpd = dev_to_genpd(dev);
1150         if (IS_ERR(genpd))
1151                 return -EINVAL;
1152
1153         return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
1154 }
1155
1156 /**
1157  * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
1158  * @dev: Device to freeze.
1159  *
1160  * Carry out a late freeze of a device under the assumption that its
1161  * pm_domain field points to the domain member of an object of type
1162  * struct generic_pm_domain representing a power domain consisting of I/O
1163  * devices.
1164  */
1165 static int pm_genpd_freeze_late(struct device *dev)
1166 {
1167         struct generic_pm_domain *genpd;
1168
1169         dev_dbg(dev, "%s()\n", __func__);
1170
1171         genpd = dev_to_genpd(dev);
1172         if (IS_ERR(genpd))
1173                 return -EINVAL;
1174
1175         return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev);
1176 }
1177
1178 /**
1179  * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1180  * @dev: Device to freeze.
1181  *
1182  * Carry out a late freeze of a device under the assumption that its
1183  * pm_domain field points to the domain member of an object of type
1184  * struct generic_pm_domain representing a power domain consisting of I/O
1185  * devices.
1186  */
1187 static int pm_genpd_freeze_noirq(struct device *dev)
1188 {
1189         struct generic_pm_domain *genpd;
1190
1191         dev_dbg(dev, "%s()\n", __func__);
1192
1193         genpd = dev_to_genpd(dev);
1194         if (IS_ERR(genpd))
1195                 return -EINVAL;
1196
1197         return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
1198 }
1199
1200 /**
1201  * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1202  * @dev: Device to thaw.
1203  *
1204  * Start the device, unless power has been removed from the domain already
1205  * before the system transition.
1206  */
1207 static int pm_genpd_thaw_noirq(struct device *dev)
1208 {
1209         struct generic_pm_domain *genpd;
1210
1211         dev_dbg(dev, "%s()\n", __func__);
1212
1213         genpd = dev_to_genpd(dev);
1214         if (IS_ERR(genpd))
1215                 return -EINVAL;
1216
1217         return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
1218 }
1219
1220 /**
1221  * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
1222  * @dev: Device to thaw.
1223  *
1224  * Carry out an early thaw of a device under the assumption that its
1225  * pm_domain field points to the domain member of an object of type
1226  * struct generic_pm_domain representing a power domain consisting of I/O
1227  * devices.
1228  */
1229 static int pm_genpd_thaw_early(struct device *dev)
1230 {
1231         struct generic_pm_domain *genpd;
1232
1233         dev_dbg(dev, "%s()\n", __func__);
1234
1235         genpd = dev_to_genpd(dev);
1236         if (IS_ERR(genpd))
1237                 return -EINVAL;
1238
1239         return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev);
1240 }
1241
1242 /**
1243  * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
1244  * @dev: Device to thaw.
1245  *
1246  * Thaw a device under the assumption that its pm_domain field points to the
1247  * domain member of an object of type struct generic_pm_domain representing
1248  * a power domain consisting of I/O devices.
1249  */
1250 static int pm_genpd_thaw(struct device *dev)
1251 {
1252         struct generic_pm_domain *genpd;
1253
1254         dev_dbg(dev, "%s()\n", __func__);
1255
1256         genpd = dev_to_genpd(dev);
1257         if (IS_ERR(genpd))
1258                 return -EINVAL;
1259
1260         return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
1261 }
1262
1263 /**
1264  * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1265  * @dev: Device to resume.
1266  *
1267  * Make sure the domain will be in the same power state as before the
1268  * hibernation the system is resuming from and start the device if necessary.
1269  */
1270 static int pm_genpd_restore_noirq(struct device *dev)
1271 {
1272         struct generic_pm_domain *genpd;
1273
1274         dev_dbg(dev, "%s()\n", __func__);
1275
1276         genpd = dev_to_genpd(dev);
1277         if (IS_ERR(genpd))
1278                 return -EINVAL;
1279
1280         /*
1281          * Since all of the "noirq" callbacks are executed sequentially, it is
1282          * guaranteed that this function will never run twice in parallel for
1283          * the same PM domain, so it is not necessary to use locking here.
1284          *
1285          * At this point suspended_count == 0 means we are being run for the
1286          * first time for the given domain in the present cycle.
1287          */
1288         if (genpd->suspended_count++ == 0) {
1289                 /*
1290                  * The boot kernel might put the domain into arbitrary state,
1291                  * so make it appear as powered off to pm_genpd_sync_poweron(),
1292                  * so that it tries to power it on in case it was really off.
1293                  */
1294                 genpd->status = GPD_STATE_POWER_OFF;
1295                 if (genpd->suspend_power_off) {
1296                         /*
1297                          * If the domain was off before the hibernation, make
1298                          * sure it will be off going forward.
1299                          */
1300                         genpd_power_off(genpd);
1301
1302                         return 0;
1303                 }
1304         }
1305
1306         if (genpd->suspend_power_off)
1307                 return 0;
1308
1309         pm_genpd_sync_poweron(genpd);
1310
1311         return genpd_start_dev(genpd, dev);
1312 }
1313
1314 /**
1315  * pm_genpd_complete - Complete power transition of a device in a power domain.
1316  * @dev: Device to complete the transition of.
1317  *
1318  * Complete a power transition of a device (during a system-wide power
1319  * transition) under the assumption that its pm_domain field points to the
1320  * domain member of an object of type struct generic_pm_domain representing
1321  * a power domain consisting of I/O devices.
1322  */
1323 static void pm_genpd_complete(struct device *dev)
1324 {
1325         struct generic_pm_domain *genpd;
1326         bool run_complete;
1327
1328         dev_dbg(dev, "%s()\n", __func__);
1329
1330         genpd = dev_to_genpd(dev);
1331         if (IS_ERR(genpd))
1332                 return;
1333
1334         mutex_lock(&genpd->lock);
1335
1336         run_complete = !genpd->suspend_power_off;
1337         if (--genpd->prepared_count == 0)
1338                 genpd->suspend_power_off = false;
1339
1340         mutex_unlock(&genpd->lock);
1341
1342         if (run_complete) {
1343                 pm_generic_complete(dev);
1344                 pm_runtime_set_active(dev);
1345                 pm_runtime_enable(dev);
1346                 pm_request_idle(dev);
1347         }
1348 }
1349
1350 /**
1351  * genpd_syscore_switch - Switch power during system core suspend or resume.
1352  * @dev: Device that normally is marked as "always on" to switch power for.
1353  *
1354  * This routine may only be called during the system core (syscore) suspend or
1355  * resume phase for devices whose "always on" flags are set.
1356  */
1357 static void genpd_syscore_switch(struct device *dev, bool suspend)
1358 {
1359         struct generic_pm_domain *genpd;
1360
1361         genpd = dev_to_genpd(dev);
1362         if (!pm_genpd_present(genpd))
1363                 return;
1364
1365         if (suspend) {
1366                 genpd->suspended_count++;
1367                 pm_genpd_sync_poweroff(genpd);
1368         } else {
1369                 pm_genpd_sync_poweron(genpd);
1370                 genpd->suspended_count--;
1371         }
1372 }
1373
1374 void pm_genpd_syscore_poweroff(struct device *dev)
1375 {
1376         genpd_syscore_switch(dev, true);
1377 }
1378 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1379
1380 void pm_genpd_syscore_poweron(struct device *dev)
1381 {
1382         genpd_syscore_switch(dev, false);
1383 }
1384 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1385
1386 #else
1387
1388 #define pm_genpd_prepare                NULL
1389 #define pm_genpd_suspend                NULL
1390 #define pm_genpd_suspend_late           NULL
1391 #define pm_genpd_suspend_noirq          NULL
1392 #define pm_genpd_resume_early           NULL
1393 #define pm_genpd_resume_noirq           NULL
1394 #define pm_genpd_resume                 NULL
1395 #define pm_genpd_freeze                 NULL
1396 #define pm_genpd_freeze_late            NULL
1397 #define pm_genpd_freeze_noirq           NULL
1398 #define pm_genpd_thaw_early             NULL
1399 #define pm_genpd_thaw_noirq             NULL
1400 #define pm_genpd_thaw                   NULL
1401 #define pm_genpd_restore_noirq          NULL
1402 #define pm_genpd_complete               NULL
1403
1404 #endif /* CONFIG_PM_SLEEP */
1405
1406 static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev)
1407 {
1408         struct generic_pm_domain_data *gpd_data;
1409
1410         gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1411         if (!gpd_data)
1412                 return NULL;
1413
1414         mutex_init(&gpd_data->lock);
1415         gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1416         dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1417         return gpd_data;
1418 }
1419
1420 static void __pm_genpd_free_dev_data(struct device *dev,
1421                                      struct generic_pm_domain_data *gpd_data)
1422 {
1423         dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1424         kfree(gpd_data);
1425 }
1426
1427 /**
1428  * __pm_genpd_add_device - Add a device to an I/O PM domain.
1429  * @genpd: PM domain to add the device to.
1430  * @dev: Device to be added.
1431  * @td: Set of PM QoS timing parameters to attach to the device.
1432  */
1433 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1434                           struct gpd_timing_data *td)
1435 {
1436         struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
1437         struct pm_domain_data *pdd;
1438         int ret = 0;
1439
1440         dev_dbg(dev, "%s()\n", __func__);
1441
1442         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1443                 return -EINVAL;
1444
1445         gpd_data_new = __pm_genpd_alloc_dev_data(dev);
1446         if (!gpd_data_new)
1447                 return -ENOMEM;
1448
1449         genpd_acquire_lock(genpd);
1450
1451         if (genpd->prepared_count > 0) {
1452                 ret = -EAGAIN;
1453                 goto out;
1454         }
1455
1456         list_for_each_entry(pdd, &genpd->dev_list, list_node)
1457                 if (pdd->dev == dev) {
1458                         ret = -EINVAL;
1459                         goto out;
1460                 }
1461
1462         ret = dev_pm_get_subsys_data(dev);
1463         if (ret)
1464                 goto out;
1465
1466         genpd->device_count++;
1467         genpd->max_off_time_changed = true;
1468
1469         spin_lock_irq(&dev->power.lock);
1470
1471         dev->pm_domain = &genpd->domain;
1472         if (dev->power.subsys_data->domain_data) {
1473                 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1474         } else {
1475                 gpd_data = gpd_data_new;
1476                 dev->power.subsys_data->domain_data = &gpd_data->base;
1477         }
1478         gpd_data->refcount++;
1479         if (td)
1480                 gpd_data->td = *td;
1481
1482         spin_unlock_irq(&dev->power.lock);
1483
1484         if (genpd->attach_dev)
1485                 genpd->attach_dev(genpd, dev);
1486
1487         mutex_lock(&gpd_data->lock);
1488         gpd_data->base.dev = dev;
1489         list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1490         gpd_data->need_restore = -1;
1491         gpd_data->td.constraint_changed = true;
1492         gpd_data->td.effective_constraint_ns = -1;
1493         mutex_unlock(&gpd_data->lock);
1494
1495  out:
1496         genpd_release_lock(genpd);
1497
1498         if (gpd_data != gpd_data_new)
1499                 __pm_genpd_free_dev_data(dev, gpd_data_new);
1500
1501         return ret;
1502 }
1503
1504 /**
1505  * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
1506  * @domain_name: Name of the PM domain to add the device to.
1507  * @dev: Device to be added.
1508  * @td: Set of PM QoS timing parameters to attach to the device.
1509  */
1510 int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
1511                                struct gpd_timing_data *td)
1512 {
1513         return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td);
1514 }
1515
1516 /**
1517  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1518  * @genpd: PM domain to remove the device from.
1519  * @dev: Device to be removed.
1520  */
1521 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1522                            struct device *dev)
1523 {
1524         struct generic_pm_domain_data *gpd_data;
1525         struct pm_domain_data *pdd;
1526         bool remove = false;
1527         int ret = 0;
1528
1529         dev_dbg(dev, "%s()\n", __func__);
1530
1531         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
1532             ||  IS_ERR_OR_NULL(dev->pm_domain)
1533             ||  pd_to_genpd(dev->pm_domain) != genpd)
1534                 return -EINVAL;
1535
1536         genpd_acquire_lock(genpd);
1537
1538         if (genpd->prepared_count > 0) {
1539                 ret = -EAGAIN;
1540                 goto out;
1541         }
1542
1543         genpd->device_count--;
1544         genpd->max_off_time_changed = true;
1545
1546         if (genpd->detach_dev)
1547                 genpd->detach_dev(genpd, dev);
1548
1549         spin_lock_irq(&dev->power.lock);
1550
1551         dev->pm_domain = NULL;
1552         pdd = dev->power.subsys_data->domain_data;
1553         list_del_init(&pdd->list_node);
1554         gpd_data = to_gpd_data(pdd);
1555         if (--gpd_data->refcount == 0) {
1556                 dev->power.subsys_data->domain_data = NULL;
1557                 remove = true;
1558         }
1559
1560         spin_unlock_irq(&dev->power.lock);
1561
1562         mutex_lock(&gpd_data->lock);
1563         pdd->dev = NULL;
1564         mutex_unlock(&gpd_data->lock);
1565
1566         genpd_release_lock(genpd);
1567
1568         dev_pm_put_subsys_data(dev);
1569         if (remove)
1570                 __pm_genpd_free_dev_data(dev, gpd_data);
1571
1572         return 0;
1573
1574  out:
1575         genpd_release_lock(genpd);
1576
1577         return ret;
1578 }
1579
1580 /**
1581  * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
1582  * @dev: Device to set/unset the flag for.
1583  * @val: The new value of the device's "need restore" flag.
1584  */
1585 void pm_genpd_dev_need_restore(struct device *dev, bool val)
1586 {
1587         struct pm_subsys_data *psd;
1588         unsigned long flags;
1589
1590         spin_lock_irqsave(&dev->power.lock, flags);
1591
1592         psd = dev_to_psd(dev);
1593         if (psd && psd->domain_data)
1594                 to_gpd_data(psd->domain_data)->need_restore = val ? 1 : 0;
1595
1596         spin_unlock_irqrestore(&dev->power.lock, flags);
1597 }
1598 EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore);
1599
1600 /**
1601  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1602  * @genpd: Master PM domain to add the subdomain to.
1603  * @subdomain: Subdomain to be added.
1604  */
1605 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1606                            struct generic_pm_domain *subdomain)
1607 {
1608         struct gpd_link *link;
1609         int ret = 0;
1610
1611         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1612             || genpd == subdomain)
1613                 return -EINVAL;
1614
1615  start:
1616         genpd_acquire_lock(genpd);
1617         mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1618
1619         if (subdomain->status != GPD_STATE_POWER_OFF
1620             && subdomain->status != GPD_STATE_ACTIVE) {
1621                 mutex_unlock(&subdomain->lock);
1622                 genpd_release_lock(genpd);
1623                 goto start;
1624         }
1625
1626         if (genpd->status == GPD_STATE_POWER_OFF
1627             &&  subdomain->status != GPD_STATE_POWER_OFF) {
1628                 ret = -EINVAL;
1629                 goto out;
1630         }
1631
1632         list_for_each_entry(link, &genpd->master_links, master_node) {
1633                 if (link->slave == subdomain && link->master == genpd) {
1634                         ret = -EINVAL;
1635                         goto out;
1636                 }
1637         }
1638
1639         link = kzalloc(sizeof(*link), GFP_KERNEL);
1640         if (!link) {
1641                 ret = -ENOMEM;
1642                 goto out;
1643         }
1644         link->master = genpd;
1645         list_add_tail(&link->master_node, &genpd->master_links);
1646         link->slave = subdomain;
1647         list_add_tail(&link->slave_node, &subdomain->slave_links);
1648         if (subdomain->status != GPD_STATE_POWER_OFF)
1649                 genpd_sd_counter_inc(genpd);
1650
1651  out:
1652         mutex_unlock(&subdomain->lock);
1653         genpd_release_lock(genpd);
1654
1655         return ret;
1656 }
1657
1658 /**
1659  * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain.
1660  * @master_name: Name of the master PM domain to add the subdomain to.
1661  * @subdomain_name: Name of the subdomain to be added.
1662  */
1663 int pm_genpd_add_subdomain_names(const char *master_name,
1664                                  const char *subdomain_name)
1665 {
1666         struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd;
1667
1668         if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
1669                 return -EINVAL;
1670
1671         mutex_lock(&gpd_list_lock);
1672         list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1673                 if (!master && !strcmp(gpd->name, master_name))
1674                         master = gpd;
1675
1676                 if (!subdomain && !strcmp(gpd->name, subdomain_name))
1677                         subdomain = gpd;
1678
1679                 if (master && subdomain)
1680                         break;
1681         }
1682         mutex_unlock(&gpd_list_lock);
1683
1684         return pm_genpd_add_subdomain(master, subdomain);
1685 }
1686
1687 /**
1688  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1689  * @genpd: Master PM domain to remove the subdomain from.
1690  * @subdomain: Subdomain to be removed.
1691  */
1692 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1693                               struct generic_pm_domain *subdomain)
1694 {
1695         struct gpd_link *link;
1696         int ret = -EINVAL;
1697
1698         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1699                 return -EINVAL;
1700
1701  start:
1702         genpd_acquire_lock(genpd);
1703
1704         list_for_each_entry(link, &genpd->master_links, master_node) {
1705                 if (link->slave != subdomain)
1706                         continue;
1707
1708                 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1709
1710                 if (subdomain->status != GPD_STATE_POWER_OFF
1711                     && subdomain->status != GPD_STATE_ACTIVE) {
1712                         mutex_unlock(&subdomain->lock);
1713                         genpd_release_lock(genpd);
1714                         goto start;
1715                 }
1716
1717                 list_del(&link->master_node);
1718                 list_del(&link->slave_node);
1719                 kfree(link);
1720                 if (subdomain->status != GPD_STATE_POWER_OFF)
1721                         genpd_sd_counter_dec(genpd);
1722
1723                 mutex_unlock(&subdomain->lock);
1724
1725                 ret = 0;
1726                 break;
1727         }
1728
1729         genpd_release_lock(genpd);
1730
1731         return ret;
1732 }
1733
1734 /**
1735  * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
1736  * @genpd: PM domain to be connected with cpuidle.
1737  * @state: cpuidle state this domain can disable/enable.
1738  *
1739  * Make a PM domain behave as though it contained a CPU core, that is, instead
1740  * of calling its power down routine it will enable the given cpuidle state so
1741  * that the cpuidle subsystem can power it down (if possible and desirable).
1742  */
1743 int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1744 {
1745         struct cpuidle_driver *cpuidle_drv;
1746         struct gpd_cpuidle_data *cpuidle_data;
1747         struct cpuidle_state *idle_state;
1748         int ret = 0;
1749
1750         if (IS_ERR_OR_NULL(genpd) || state < 0)
1751                 return -EINVAL;
1752
1753         genpd_acquire_lock(genpd);
1754
1755         if (genpd->cpuidle_data) {
1756                 ret = -EEXIST;
1757                 goto out;
1758         }
1759         cpuidle_data = kzalloc(sizeof(*cpuidle_data), GFP_KERNEL);
1760         if (!cpuidle_data) {
1761                 ret = -ENOMEM;
1762                 goto out;
1763         }
1764         cpuidle_drv = cpuidle_driver_ref();
1765         if (!cpuidle_drv) {
1766                 ret = -ENODEV;
1767                 goto err_drv;
1768         }
1769         if (cpuidle_drv->state_count <= state) {
1770                 ret = -EINVAL;
1771                 goto err;
1772         }
1773         idle_state = &cpuidle_drv->states[state];
1774         if (!idle_state->disabled) {
1775                 ret = -EAGAIN;
1776                 goto err;
1777         }
1778         cpuidle_data->idle_state = idle_state;
1779         cpuidle_data->saved_exit_latency = idle_state->exit_latency;
1780         genpd->cpuidle_data = cpuidle_data;
1781         genpd_recalc_cpu_exit_latency(genpd);
1782
1783  out:
1784         genpd_release_lock(genpd);
1785         return ret;
1786
1787  err:
1788         cpuidle_driver_unref();
1789
1790  err_drv:
1791         kfree(cpuidle_data);
1792         goto out;
1793 }
1794
1795 /**
1796  * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it.
1797  * @name: Name of the domain to connect to cpuidle.
1798  * @state: cpuidle state this domain can manipulate.
1799  */
1800 int pm_genpd_name_attach_cpuidle(const char *name, int state)
1801 {
1802         return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state);
1803 }
1804
1805 /**
1806  * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain.
1807  * @genpd: PM domain to remove the cpuidle connection from.
1808  *
1809  * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the
1810  * given PM domain.
1811  */
1812 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
1813 {
1814         struct gpd_cpuidle_data *cpuidle_data;
1815         struct cpuidle_state *idle_state;
1816         int ret = 0;
1817
1818         if (IS_ERR_OR_NULL(genpd))
1819                 return -EINVAL;
1820
1821         genpd_acquire_lock(genpd);
1822
1823         cpuidle_data = genpd->cpuidle_data;
1824         if (!cpuidle_data) {
1825                 ret = -ENODEV;
1826                 goto out;
1827         }
1828         idle_state = cpuidle_data->idle_state;
1829         if (!idle_state->disabled) {
1830                 ret = -EAGAIN;
1831                 goto out;
1832         }
1833         idle_state->exit_latency = cpuidle_data->saved_exit_latency;
1834         cpuidle_driver_unref();
1835         genpd->cpuidle_data = NULL;
1836         kfree(cpuidle_data);
1837
1838  out:
1839         genpd_release_lock(genpd);
1840         return ret;
1841 }
1842
1843 /**
1844  * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it.
1845  * @name: Name of the domain to disconnect cpuidle from.
1846  */
1847 int pm_genpd_name_detach_cpuidle(const char *name)
1848 {
1849         return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name));
1850 }
1851
1852 /* Default device callbacks for generic PM domains. */
1853
1854 /**
1855  * pm_genpd_default_save_state - Default "save device state" for PM domains.
1856  * @dev: Device to handle.
1857  */
1858 static int pm_genpd_default_save_state(struct device *dev)
1859 {
1860         int (*cb)(struct device *__dev);
1861
1862         if (dev->type && dev->type->pm)
1863                 cb = dev->type->pm->runtime_suspend;
1864         else if (dev->class && dev->class->pm)
1865                 cb = dev->class->pm->runtime_suspend;
1866         else if (dev->bus && dev->bus->pm)
1867                 cb = dev->bus->pm->runtime_suspend;
1868         else
1869                 cb = NULL;
1870
1871         if (!cb && dev->driver && dev->driver->pm)
1872                 cb = dev->driver->pm->runtime_suspend;
1873
1874         return cb ? cb(dev) : 0;
1875 }
1876
1877 /**
1878  * pm_genpd_default_restore_state - Default PM domains "restore device state".
1879  * @dev: Device to handle.
1880  */
1881 static int pm_genpd_default_restore_state(struct device *dev)
1882 {
1883         int (*cb)(struct device *__dev);
1884
1885         if (dev->type && dev->type->pm)
1886                 cb = dev->type->pm->runtime_resume;
1887         else if (dev->class && dev->class->pm)
1888                 cb = dev->class->pm->runtime_resume;
1889         else if (dev->bus && dev->bus->pm)
1890                 cb = dev->bus->pm->runtime_resume;
1891         else
1892                 cb = NULL;
1893
1894         if (!cb && dev->driver && dev->driver->pm)
1895                 cb = dev->driver->pm->runtime_resume;
1896
1897         return cb ? cb(dev) : 0;
1898 }
1899
1900 /**
1901  * pm_genpd_init - Initialize a generic I/O PM domain object.
1902  * @genpd: PM domain object to initialize.
1903  * @gov: PM domain governor to associate with the domain (may be NULL).
1904  * @is_off: Initial value of the domain's power_is_off field.
1905  */
1906 void pm_genpd_init(struct generic_pm_domain *genpd,
1907                    struct dev_power_governor *gov, bool is_off)
1908 {
1909         if (IS_ERR_OR_NULL(genpd))
1910                 return;
1911
1912         INIT_LIST_HEAD(&genpd->master_links);
1913         INIT_LIST_HEAD(&genpd->slave_links);
1914         INIT_LIST_HEAD(&genpd->dev_list);
1915         mutex_init(&genpd->lock);
1916         genpd->gov = gov;
1917         INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1918         genpd->in_progress = 0;
1919         atomic_set(&genpd->sd_count, 0);
1920         genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1921         init_waitqueue_head(&genpd->status_wait_queue);
1922         genpd->poweroff_task = NULL;
1923         genpd->resume_count = 0;
1924         genpd->device_count = 0;
1925         genpd->max_off_time_ns = -1;
1926         genpd->max_off_time_changed = true;
1927         genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1928         genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1929         genpd->domain.ops.prepare = pm_genpd_prepare;
1930         genpd->domain.ops.suspend = pm_genpd_suspend;
1931         genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
1932         genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1933         genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1934         genpd->domain.ops.resume_early = pm_genpd_resume_early;
1935         genpd->domain.ops.resume = pm_genpd_resume;
1936         genpd->domain.ops.freeze = pm_genpd_freeze;
1937         genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
1938         genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1939         genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1940         genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
1941         genpd->domain.ops.thaw = pm_genpd_thaw;
1942         genpd->domain.ops.poweroff = pm_genpd_suspend;
1943         genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
1944         genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1945         genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1946         genpd->domain.ops.restore_early = pm_genpd_resume_early;
1947         genpd->domain.ops.restore = pm_genpd_resume;
1948         genpd->domain.ops.complete = pm_genpd_complete;
1949         genpd->dev_ops.save_state = pm_genpd_default_save_state;
1950         genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
1951         mutex_lock(&gpd_list_lock);
1952         list_add(&genpd->gpd_list_node, &gpd_list);
1953         mutex_unlock(&gpd_list_lock);
1954 }
1955
1956 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1957 /*
1958  * Device Tree based PM domain providers.
1959  *
1960  * The code below implements generic device tree based PM domain providers that
1961  * bind device tree nodes with generic PM domains registered in the system.
1962  *
1963  * Any driver that registers generic PM domains and needs to support binding of
1964  * devices to these domains is supposed to register a PM domain provider, which
1965  * maps a PM domain specifier retrieved from the device tree to a PM domain.
1966  *
1967  * Two simple mapping functions have been provided for convenience:
1968  *  - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1969  *  - __of_genpd_xlate_onecell() for mapping of multiple PM domains per node by
1970  *    index.
1971  */
1972
1973 /**
1974  * struct of_genpd_provider - PM domain provider registration structure
1975  * @link: Entry in global list of PM domain providers
1976  * @node: Pointer to device tree node of PM domain provider
1977  * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1978  *         into a PM domain.
1979  * @data: context pointer to be passed into @xlate callback
1980  */
1981 struct of_genpd_provider {
1982         struct list_head link;
1983         struct device_node *node;
1984         genpd_xlate_t xlate;
1985         void *data;
1986 };
1987
1988 /* List of registered PM domain providers. */
1989 static LIST_HEAD(of_genpd_providers);
1990 /* Mutex to protect the list above. */
1991 static DEFINE_MUTEX(of_genpd_mutex);
1992
1993 /**
1994  * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping
1995  * @genpdspec: OF phandle args to map into a PM domain
1996  * @data: xlate function private data - pointer to struct generic_pm_domain
1997  *
1998  * This is a generic xlate function that can be used to model PM domains that
1999  * have their own device tree nodes. The private data of xlate function needs
2000  * to be a valid pointer to struct generic_pm_domain.
2001  */
2002 struct generic_pm_domain *__of_genpd_xlate_simple(
2003                                         struct of_phandle_args *genpdspec,
2004                                         void *data)
2005 {
2006         if (genpdspec->args_count != 0)
2007                 return ERR_PTR(-EINVAL);
2008         return data;
2009 }
2010 EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple);
2011
2012 /**
2013  * __of_genpd_xlate_onecell() - Xlate function using a single index.
2014  * @genpdspec: OF phandle args to map into a PM domain
2015  * @data: xlate function private data - pointer to struct genpd_onecell_data
2016  *
2017  * This is a generic xlate function that can be used to model simple PM domain
2018  * controllers that have one device tree node and provide multiple PM domains.
2019  * A single cell is used as an index into an array of PM domains specified in
2020  * the genpd_onecell_data struct when registering the provider.
2021  */
2022 struct generic_pm_domain *__of_genpd_xlate_onecell(
2023                                         struct of_phandle_args *genpdspec,
2024                                         void *data)
2025 {
2026         struct genpd_onecell_data *genpd_data = data;
2027         unsigned int idx = genpdspec->args[0];
2028
2029         if (genpdspec->args_count != 1)
2030                 return ERR_PTR(-EINVAL);
2031
2032         if (idx >= genpd_data->num_domains) {
2033                 pr_err("%s: invalid domain index %u\n", __func__, idx);
2034                 return ERR_PTR(-EINVAL);
2035         }
2036
2037         if (!genpd_data->domains[idx])
2038                 return ERR_PTR(-ENOENT);
2039
2040         return genpd_data->domains[idx];
2041 }
2042 EXPORT_SYMBOL_GPL(__of_genpd_xlate_onecell);
2043
2044 /**
2045  * __of_genpd_add_provider() - Register a PM domain provider for a node
2046  * @np: Device node pointer associated with the PM domain provider.
2047  * @xlate: Callback for decoding PM domain from phandle arguments.
2048  * @data: Context pointer for @xlate callback.
2049  */
2050 int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2051                         void *data)
2052 {
2053         struct of_genpd_provider *cp;
2054
2055         cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2056         if (!cp)
2057                 return -ENOMEM;
2058
2059         cp->node = of_node_get(np);
2060         cp->data = data;
2061         cp->xlate = xlate;
2062
2063         mutex_lock(&of_genpd_mutex);
2064         list_add(&cp->link, &of_genpd_providers);
2065         mutex_unlock(&of_genpd_mutex);
2066         pr_debug("Added domain provider from %s\n", np->full_name);
2067
2068         return 0;
2069 }
2070 EXPORT_SYMBOL_GPL(__of_genpd_add_provider);
2071
2072 /**
2073  * of_genpd_del_provider() - Remove a previously registered PM domain provider
2074  * @np: Device node pointer associated with the PM domain provider
2075  */
2076 void of_genpd_del_provider(struct device_node *np)
2077 {
2078         struct of_genpd_provider *cp;
2079
2080         mutex_lock(&of_genpd_mutex);
2081         list_for_each_entry(cp, &of_genpd_providers, link) {
2082                 if (cp->node == np) {
2083                         list_del(&cp->link);
2084                         of_node_put(cp->node);
2085                         kfree(cp);
2086                         break;
2087                 }
2088         }
2089         mutex_unlock(&of_genpd_mutex);
2090 }
2091 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2092
2093 /**
2094  * of_genpd_get_from_provider() - Look-up PM domain
2095  * @genpdspec: OF phandle args to use for look-up
2096  *
2097  * Looks for a PM domain provider under the node specified by @genpdspec and if
2098  * found, uses xlate function of the provider to map phandle args to a PM
2099  * domain.
2100  *
2101  * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2102  * on failure.
2103  */
2104 static struct generic_pm_domain *of_genpd_get_from_provider(
2105                                         struct of_phandle_args *genpdspec)
2106 {
2107         struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2108         struct of_genpd_provider *provider;
2109
2110         mutex_lock(&of_genpd_mutex);
2111
2112         /* Check if we have such a provider in our array */
2113         list_for_each_entry(provider, &of_genpd_providers, link) {
2114                 if (provider->node == genpdspec->np)
2115                         genpd = provider->xlate(genpdspec, provider->data);
2116                 if (!IS_ERR(genpd))
2117                         break;
2118         }
2119
2120         mutex_unlock(&of_genpd_mutex);
2121
2122         return genpd;
2123 }
2124
2125 /**
2126  * genpd_dev_pm_detach - Detach a device from its PM domain.
2127  * @dev: Device to attach.
2128  * @power_off: Currently not used
2129  *
2130  * Try to locate a corresponding generic PM domain, which the device was
2131  * attached to previously. If such is found, the device is detached from it.
2132  */
2133 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2134 {
2135         struct generic_pm_domain *pd = NULL, *gpd;
2136         int ret = 0;
2137
2138         if (!dev->pm_domain)
2139                 return;
2140
2141         mutex_lock(&gpd_list_lock);
2142         list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2143                 if (&gpd->domain == dev->pm_domain) {
2144                         pd = gpd;
2145                         break;
2146                 }
2147         }
2148         mutex_unlock(&gpd_list_lock);
2149
2150         if (!pd)
2151                 return;
2152
2153         dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2154
2155         while (1) {
2156                 ret = pm_genpd_remove_device(pd, dev);
2157                 if (ret != -EAGAIN)
2158                         break;
2159                 cond_resched();
2160         }
2161
2162         if (ret < 0) {
2163                 dev_err(dev, "failed to remove from PM domain %s: %d",
2164                         pd->name, ret);
2165                 return;
2166         }
2167
2168         /* Check if PM domain can be powered off after removing this device. */
2169         genpd_queue_power_off_work(pd);
2170 }
2171
2172 /**
2173  * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2174  * @dev: Device to attach.
2175  *
2176  * Parse device's OF node to find a PM domain specifier. If such is found,
2177  * attaches the device to retrieved pm_domain ops.
2178  *
2179  * Both generic and legacy Samsung-specific DT bindings are supported to keep
2180  * backwards compatibility with existing DTBs.
2181  *
2182  * Returns 0 on successfully attached PM domain or negative error code.
2183  */
2184 int genpd_dev_pm_attach(struct device *dev)
2185 {
2186         struct of_phandle_args pd_args;
2187         struct generic_pm_domain *pd;
2188         int ret;
2189
2190         if (!dev->of_node)
2191                 return -ENODEV;
2192
2193         if (dev->pm_domain)
2194                 return -EEXIST;
2195
2196         ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2197                                         "#power-domain-cells", 0, &pd_args);
2198         if (ret < 0) {
2199                 if (ret != -ENOENT)
2200                         return ret;
2201
2202                 /*
2203                  * Try legacy Samsung-specific bindings
2204                  * (for backwards compatibility of DT ABI)
2205                  */
2206                 pd_args.args_count = 0;
2207                 pd_args.np = of_parse_phandle(dev->of_node,
2208                                                 "samsung,power-domain", 0);
2209                 if (!pd_args.np)
2210                         return -ENOENT;
2211         }
2212
2213         pd = of_genpd_get_from_provider(&pd_args);
2214         if (IS_ERR(pd)) {
2215                 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2216                         __func__, PTR_ERR(pd));
2217                 of_node_put(dev->of_node);
2218                 return PTR_ERR(pd);
2219         }
2220
2221         dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2222
2223         while (1) {
2224                 ret = pm_genpd_add_device(pd, dev);
2225                 if (ret != -EAGAIN)
2226                         break;
2227                 cond_resched();
2228         }
2229
2230         if (ret < 0) {
2231                 dev_err(dev, "failed to add to PM domain %s: %d",
2232                         pd->name, ret);
2233                 of_node_put(dev->of_node);
2234                 return ret;
2235         }
2236
2237         dev->pm_domain->detach = genpd_dev_pm_detach;
2238
2239         return 0;
2240 }
2241 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2242 #endif
2243
2244
2245 /***        debugfs support        ***/
2246
2247 #ifdef CONFIG_PM_ADVANCED_DEBUG
2248 #include <linux/pm.h>
2249 #include <linux/device.h>
2250 #include <linux/debugfs.h>
2251 #include <linux/seq_file.h>
2252 #include <linux/init.h>
2253 #include <linux/kobject.h>
2254 static struct dentry *pm_genpd_debugfs_dir;
2255
2256 /*
2257  * TODO: This function is a slightly modified version of rtpm_status_show
2258  * from sysfs.c, but dependencies between PM_GENERIC_DOMAINS and PM_RUNTIME
2259  * are too loose to generalize it.
2260  */
2261 #ifdef CONFIG_PM_RUNTIME
2262 static void rtpm_status_str(struct seq_file *s, struct device *dev)
2263 {
2264         static const char * const status_lookup[] = {
2265                 [RPM_ACTIVE] = "active",
2266                 [RPM_RESUMING] = "resuming",
2267                 [RPM_SUSPENDED] = "suspended",
2268                 [RPM_SUSPENDING] = "suspending"
2269         };
2270         const char *p = "";
2271
2272         if (dev->power.runtime_error)
2273                 p = "error";
2274         else if (dev->power.disable_depth)
2275                 p = "unsupported";
2276         else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2277                 p = status_lookup[dev->power.runtime_status];
2278         else
2279                 WARN_ON(1);
2280
2281         seq_puts(s, p);
2282 }
2283 #else
2284 static void rtpm_status_str(struct seq_file *s, struct device *dev)
2285 {
2286         seq_puts(s, "active");
2287 }
2288 #endif
2289
2290 static int pm_genpd_summary_one(struct seq_file *s,
2291                 struct generic_pm_domain *gpd)
2292 {
2293         static const char * const status_lookup[] = {
2294                 [GPD_STATE_ACTIVE] = "on",
2295                 [GPD_STATE_WAIT_MASTER] = "wait-master",
2296                 [GPD_STATE_BUSY] = "busy",
2297                 [GPD_STATE_REPEAT] = "off-in-progress",
2298                 [GPD_STATE_POWER_OFF] = "off"
2299         };
2300         struct pm_domain_data *pm_data;
2301         const char *kobj_path;
2302         struct gpd_link *link;
2303         int ret;
2304
2305         ret = mutex_lock_interruptible(&gpd->lock);
2306         if (ret)
2307                 return -ERESTARTSYS;
2308
2309         if (WARN_ON(gpd->status >= ARRAY_SIZE(status_lookup)))
2310                 goto exit;
2311         seq_printf(s, "%-30s  %-15s  ", gpd->name, status_lookup[gpd->status]);
2312
2313         /*
2314          * Modifications on the list require holding locks on both
2315          * master and slave, so we are safe.
2316          * Also gpd->name is immutable.
2317          */
2318         list_for_each_entry(link, &gpd->master_links, master_node) {
2319                 seq_printf(s, "%s", link->slave->name);
2320                 if (!list_is_last(&link->master_node, &gpd->master_links))
2321                         seq_puts(s, ", ");
2322         }
2323
2324         list_for_each_entry(pm_data, &gpd->dev_list, list_node) {
2325                 kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
2326                 if (kobj_path == NULL)
2327                         continue;
2328
2329                 seq_printf(s, "\n    %-50s  ", kobj_path);
2330                 rtpm_status_str(s, pm_data->dev);
2331                 kfree(kobj_path);
2332         }
2333
2334         seq_puts(s, "\n");
2335 exit:
2336         mutex_unlock(&gpd->lock);
2337
2338         return 0;
2339 }
2340
2341 static int pm_genpd_summary_show(struct seq_file *s, void *data)
2342 {
2343         struct generic_pm_domain *gpd;
2344         int ret = 0;
2345
2346         seq_puts(s, "    domain                      status         slaves\n");
2347         seq_puts(s, "           /device                                      runtime status\n");
2348         seq_puts(s, "----------------------------------------------------------------------\n");
2349
2350         ret = mutex_lock_interruptible(&gpd_list_lock);
2351         if (ret)
2352                 return -ERESTARTSYS;
2353
2354         list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2355                 ret = pm_genpd_summary_one(s, gpd);
2356                 if (ret)
2357                         break;
2358         }
2359         mutex_unlock(&gpd_list_lock);
2360
2361         return ret;
2362 }
2363
2364 static int pm_genpd_summary_open(struct inode *inode, struct file *file)
2365 {
2366         return single_open(file, pm_genpd_summary_show, NULL);
2367 }
2368
2369 static const struct file_operations pm_genpd_summary_fops = {
2370         .open = pm_genpd_summary_open,
2371         .read = seq_read,
2372         .llseek = seq_lseek,
2373         .release = single_release,
2374 };
2375
2376 static int __init pm_genpd_debug_init(void)
2377 {
2378         struct dentry *d;
2379
2380         pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
2381
2382         if (!pm_genpd_debugfs_dir)
2383                 return -ENOMEM;
2384
2385         d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
2386                         pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
2387         if (!d)
2388                 return -ENOMEM;
2389
2390         return 0;
2391 }
2392 late_initcall(pm_genpd_debug_init);
2393
2394 static void __exit pm_genpd_debug_exit(void)
2395 {
2396         debugfs_remove_recursive(pm_genpd_debugfs_dir);
2397 }
2398 __exitcall(pm_genpd_debug_exit);
2399 #endif /* CONFIG_PM_ADVANCED_DEBUG */