PM / OPP: Introduce dev_pm_opp_get_max_volt_latency()
[linux-block.git] / drivers / base / power / opp / core.c
CommitLineData
e1f60b29
NM
1/*
2 * Generic OPP Interface
3 *
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
5 * Nishanth Menon
6 * Romit Dasgupta
7 * Kevin Hilman
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
d6d2a528
VK
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
e1f60b29
NM
16#include <linux/errno.h>
17#include <linux/err.h>
e1f60b29 18#include <linux/slab.h>
51990e82 19#include <linux/device.h>
b496dfbc 20#include <linux/of.h>
80126ce7 21#include <linux/export.h>
9f8ea969 22#include <linux/regulator/consumer.h>
e1f60b29 23
f59d3ee8 24#include "opp.h"
e1f60b29
NM
25
26/*
27 * The root of the list of all devices. All device_opp structures branch off
28 * from here, with each device_opp containing the list of opp it supports in
29 * various states of availability.
30 */
31static LIST_HEAD(dev_opp_list);
32/* Lock to allow exclusive modification to the device and opp lists */
87b4115d 33DEFINE_MUTEX(dev_opp_list_lock);
e1f60b29 34
b02ded24
DT
35#define opp_rcu_lockdep_assert() \
36do { \
f78f5b90
PM
37 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
38 !lockdep_is_held(&dev_opp_list_lock), \
b02ded24
DT
39 "Missing rcu_read_lock() or " \
40 "dev_opp_list_lock protection"); \
41} while (0)
42
06441658
VK
43static struct device_list_opp *_find_list_dev(const struct device *dev,
44 struct device_opp *dev_opp)
45{
46 struct device_list_opp *list_dev;
47
48 list_for_each_entry(list_dev, &dev_opp->dev_list, node)
49 if (list_dev->dev == dev)
50 return list_dev;
51
52 return NULL;
53}
54
55static struct device_opp *_managed_opp(const struct device_node *np)
56{
57 struct device_opp *dev_opp;
58
59 list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) {
60 if (dev_opp->np == np) {
61 /*
62 * Multiple devices can point to the same OPP table and
63 * so will have same node-pointer, np.
64 *
65 * But the OPPs will be considered as shared only if the
66 * OPP table contains a "opp-shared" property.
67 */
68 return dev_opp->shared_opp ? dev_opp : NULL;
69 }
70 }
71
72 return NULL;
73}
74
e1f60b29 75/**
327854c8 76 * _find_device_opp() - find device_opp struct using device pointer
e1f60b29
NM
77 * @dev: device pointer used to lookup device OPPs
78 *
79 * Search list of device OPPs for one containing matching device. Does a RCU
80 * reader operation to grab the pointer needed.
81 *
984f16c8 82 * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or
e1f60b29
NM
83 * -EINVAL based on type of error.
84 *
0597e818
VK
85 * Locking: For readers, this function must be called under rcu_read_lock().
86 * device_opp is a RCU protected pointer, which means that device_opp is valid
87 * as long as we are under RCU lock.
88 *
89 * For Writers, this function must be called with dev_opp_list_lock held.
e1f60b29 90 */
f59d3ee8 91struct device_opp *_find_device_opp(struct device *dev)
e1f60b29 92{
06441658 93 struct device_opp *dev_opp;
e1f60b29 94
0597e818
VK
95 opp_rcu_lockdep_assert();
96
50a3cb04 97 if (IS_ERR_OR_NULL(dev)) {
e1f60b29
NM
98 pr_err("%s: Invalid parameters\n", __func__);
99 return ERR_PTR(-EINVAL);
100 }
101
06441658
VK
102 list_for_each_entry_rcu(dev_opp, &dev_opp_list, node)
103 if (_find_list_dev(dev, dev_opp))
104 return dev_opp;
e1f60b29 105
06441658 106 return ERR_PTR(-ENODEV);
e1f60b29
NM
107}
108
109/**
d6d00742 110 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
e1f60b29
NM
111 * @opp: opp for which voltage has to be returned for
112 *
984f16c8 113 * Return: voltage in micro volt corresponding to the opp, else
e1f60b29
NM
114 * return 0
115 *
116 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
117 * protected pointer. This means that opp which could have been fetched by
118 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
119 * under RCU lock. The pointer returned by the opp_find_freq family must be
120 * used in the same section as the usage of this function with the pointer
121 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
122 * pointer.
123 */
47d43ba7 124unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
e1f60b29 125{
47d43ba7 126 struct dev_pm_opp *tmp_opp;
e1f60b29
NM
127 unsigned long v = 0;
128
04bf1c7f
KK
129 opp_rcu_lockdep_assert();
130
e1f60b29 131 tmp_opp = rcu_dereference(opp);
d6d00742 132 if (IS_ERR_OR_NULL(tmp_opp))
e1f60b29
NM
133 pr_err("%s: Invalid parameters\n", __func__);
134 else
135 v = tmp_opp->u_volt;
136
137 return v;
138}
5d4879cd 139EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
e1f60b29
NM
140
141/**
5d4879cd 142 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
e1f60b29
NM
143 * @opp: opp for which frequency has to be returned for
144 *
984f16c8 145 * Return: frequency in hertz corresponding to the opp, else
e1f60b29
NM
146 * return 0
147 *
148 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
149 * protected pointer. This means that opp which could have been fetched by
150 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
151 * under RCU lock. The pointer returned by the opp_find_freq family must be
152 * used in the same section as the usage of this function with the pointer
153 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
154 * pointer.
155 */
47d43ba7 156unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
e1f60b29 157{
47d43ba7 158 struct dev_pm_opp *tmp_opp;
e1f60b29
NM
159 unsigned long f = 0;
160
04bf1c7f
KK
161 opp_rcu_lockdep_assert();
162
e1f60b29 163 tmp_opp = rcu_dereference(opp);
50a3cb04 164 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
e1f60b29
NM
165 pr_err("%s: Invalid parameters\n", __func__);
166 else
167 f = tmp_opp->rate;
168
169 return f;
170}
5d4879cd 171EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
e1f60b29 172
19445b25
BZ
173/**
174 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
175 * @opp: opp for which turbo mode is being verified
176 *
177 * Turbo OPPs are not for normal use, and can be enabled (under certain
178 * conditions) for short duration of times to finish high throughput work
179 * quickly. Running on them for longer times may overheat the chip.
180 *
181 * Return: true if opp is turbo opp, else false.
182 *
183 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
184 * protected pointer. This means that opp which could have been fetched by
185 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
186 * under RCU lock. The pointer returned by the opp_find_freq family must be
187 * used in the same section as the usage of this function with the pointer
188 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
189 * pointer.
190 */
191bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
192{
193 struct dev_pm_opp *tmp_opp;
194
195 opp_rcu_lockdep_assert();
196
197 tmp_opp = rcu_dereference(opp);
198 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
199 pr_err("%s: Invalid parameters\n", __func__);
200 return false;
201 }
202
203 return tmp_opp->turbo;
204}
205EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
206
3ca9bb33
VK
207/**
208 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
209 * @dev: device for which we do this operation
210 *
211 * Return: This function returns the max clock latency in nanoseconds.
212 *
213 * Locking: This function takes rcu_read_lock().
214 */
215unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
216{
217 struct device_opp *dev_opp;
218 unsigned long clock_latency_ns;
219
220 rcu_read_lock();
221
222 dev_opp = _find_device_opp(dev);
223 if (IS_ERR(dev_opp))
224 clock_latency_ns = 0;
225 else
226 clock_latency_ns = dev_opp->clock_latency_ns_max;
227
228 rcu_read_unlock();
229 return clock_latency_ns;
230}
231EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
232
655c9df9
VK
233/**
234 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
235 * @dev: device for which we do this operation
236 *
237 * Return: This function returns the max voltage latency in nanoseconds.
238 *
239 * Locking: This function takes rcu_read_lock().
240 */
241unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
242{
243 struct device_opp *dev_opp;
244 struct dev_pm_opp *opp;
245 struct regulator *reg;
246 unsigned long latency_ns = 0;
247 unsigned long min_uV = ~0, max_uV = 0;
248 int ret;
249
250 rcu_read_lock();
251
252 dev_opp = _find_device_opp(dev);
253 if (IS_ERR(dev_opp)) {
254 rcu_read_unlock();
255 return 0;
256 }
257
258 reg = dev_opp->regulator;
259 if (IS_ERR_OR_NULL(reg)) {
260 /* Regulator may not be required for device */
261 if (reg)
262 dev_err(dev, "%s: Invalid regulator (%ld)\n", __func__,
263 PTR_ERR(reg));
264 rcu_read_unlock();
265 return 0;
266 }
267
268 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
269 if (!opp->available)
270 continue;
271
272 if (opp->u_volt_min < min_uV)
273 min_uV = opp->u_volt_min;
274 if (opp->u_volt_max > max_uV)
275 max_uV = opp->u_volt_max;
276 }
277
278 rcu_read_unlock();
279
280 /*
281 * The caller needs to ensure that dev_opp (and hence the regulator)
282 * isn't freed, while we are executing this routine.
283 */
284 ret = regulator_set_voltage_time(reg, min_uV, max_uV);
285 if (ret > 0)
286 latency_ns = ret * 1000;
287
288 return latency_ns;
289}
290EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
291
4eafbd15
BZ
292/**
293 * dev_pm_opp_get_suspend_opp() - Get suspend opp
294 * @dev: device for which we do this operation
295 *
296 * Return: This function returns pointer to the suspend opp if it is
1b2b90cb 297 * defined and available, otherwise it returns NULL.
4eafbd15
BZ
298 *
299 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
300 * protected pointer. The reason for the same is that the opp pointer which is
301 * returned will remain valid for use with opp_get_{voltage, freq} only while
302 * under the locked area. The pointer returned must be used prior to unlocking
303 * with rcu_read_unlock() to maintain the integrity of the pointer.
304 */
305struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
306{
307 struct device_opp *dev_opp;
4eafbd15
BZ
308
309 opp_rcu_lockdep_assert();
310
311 dev_opp = _find_device_opp(dev);
1b2b90cb
VK
312 if (IS_ERR(dev_opp) || !dev_opp->suspend_opp ||
313 !dev_opp->suspend_opp->available)
314 return NULL;
4eafbd15 315
1b2b90cb 316 return dev_opp->suspend_opp;
4eafbd15
BZ
317}
318EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
319
e1f60b29 320/**
5d4879cd 321 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
e1f60b29
NM
322 * @dev: device for which we do this operation
323 *
984f16c8 324 * Return: This function returns the number of available opps if there are any,
e1f60b29
NM
325 * else returns 0 if none or the corresponding error value.
326 *
b4718c02 327 * Locking: This function takes rcu_read_lock().
e1f60b29 328 */
5d4879cd 329int dev_pm_opp_get_opp_count(struct device *dev)
e1f60b29
NM
330{
331 struct device_opp *dev_opp;
47d43ba7 332 struct dev_pm_opp *temp_opp;
e1f60b29
NM
333 int count = 0;
334
b4718c02 335 rcu_read_lock();
b02ded24 336
327854c8 337 dev_opp = _find_device_opp(dev);
e1f60b29 338 if (IS_ERR(dev_opp)) {
b4718c02
DT
339 count = PTR_ERR(dev_opp);
340 dev_err(dev, "%s: device OPP not found (%d)\n",
341 __func__, count);
342 goto out_unlock;
e1f60b29
NM
343 }
344
345 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
346 if (temp_opp->available)
347 count++;
348 }
349
b4718c02
DT
350out_unlock:
351 rcu_read_unlock();
e1f60b29
NM
352 return count;
353}
5d4879cd 354EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
e1f60b29
NM
355
356/**
5d4879cd 357 * dev_pm_opp_find_freq_exact() - search for an exact frequency
e1f60b29
NM
358 * @dev: device for which we do this operation
359 * @freq: frequency to search for
7ae49618 360 * @available: true/false - match for available opp
e1f60b29 361 *
984f16c8
NM
362 * Return: Searches for exact match in the opp list and returns pointer to the
363 * matching opp if found, else returns ERR_PTR in case of error and should
364 * be handled using IS_ERR. Error return values can be:
0779726c
NM
365 * EINVAL: for bad pointer
366 * ERANGE: no match found for search
367 * ENODEV: if device not found in list of registered devices
e1f60b29
NM
368 *
369 * Note: available is a modifier for the search. if available=true, then the
370 * match is for exact matching frequency and is available in the stored OPP
371 * table. if false, the match is for exact frequency which is not available.
372 *
373 * This provides a mechanism to enable an opp which is not available currently
374 * or the opposite as well.
375 *
376 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
377 * protected pointer. The reason for the same is that the opp pointer which is
378 * returned will remain valid for use with opp_get_{voltage, freq} only while
379 * under the locked area. The pointer returned must be used prior to unlocking
380 * with rcu_read_unlock() to maintain the integrity of the pointer.
381 */
47d43ba7
NM
382struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
383 unsigned long freq,
384 bool available)
e1f60b29
NM
385{
386 struct device_opp *dev_opp;
47d43ba7 387 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
e1f60b29 388
b02ded24
DT
389 opp_rcu_lockdep_assert();
390
327854c8 391 dev_opp = _find_device_opp(dev);
e1f60b29
NM
392 if (IS_ERR(dev_opp)) {
393 int r = PTR_ERR(dev_opp);
394 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
395 return ERR_PTR(r);
396 }
397
398 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
399 if (temp_opp->available == available &&
400 temp_opp->rate == freq) {
401 opp = temp_opp;
402 break;
403 }
404 }
405
406 return opp;
407}
5d4879cd 408EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
e1f60b29
NM
409
410/**
5d4879cd 411 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
e1f60b29
NM
412 * @dev: device for which we do this operation
413 * @freq: Start frequency
414 *
415 * Search for the matching ceil *available* OPP from a starting freq
416 * for a device.
417 *
984f16c8 418 * Return: matching *opp and refreshes *freq accordingly, else returns
0779726c
NM
419 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
420 * values can be:
421 * EINVAL: for bad pointer
422 * ERANGE: no match found for search
423 * ENODEV: if device not found in list of registered devices
e1f60b29
NM
424 *
425 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
426 * protected pointer. The reason for the same is that the opp pointer which is
427 * returned will remain valid for use with opp_get_{voltage, freq} only while
428 * under the locked area. The pointer returned must be used prior to unlocking
429 * with rcu_read_unlock() to maintain the integrity of the pointer.
430 */
47d43ba7
NM
431struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
432 unsigned long *freq)
e1f60b29
NM
433{
434 struct device_opp *dev_opp;
47d43ba7 435 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
e1f60b29 436
b02ded24
DT
437 opp_rcu_lockdep_assert();
438
e1f60b29
NM
439 if (!dev || !freq) {
440 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
441 return ERR_PTR(-EINVAL);
442 }
443
327854c8 444 dev_opp = _find_device_opp(dev);
e1f60b29 445 if (IS_ERR(dev_opp))
0779726c 446 return ERR_CAST(dev_opp);
e1f60b29
NM
447
448 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
449 if (temp_opp->available && temp_opp->rate >= *freq) {
450 opp = temp_opp;
451 *freq = opp->rate;
452 break;
453 }
454 }
455
456 return opp;
457}
5d4879cd 458EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
e1f60b29
NM
459
460/**
5d4879cd 461 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
e1f60b29
NM
462 * @dev: device for which we do this operation
463 * @freq: Start frequency
464 *
465 * Search for the matching floor *available* OPP from a starting freq
466 * for a device.
467 *
984f16c8 468 * Return: matching *opp and refreshes *freq accordingly, else returns
0779726c
NM
469 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
470 * values can be:
471 * EINVAL: for bad pointer
472 * ERANGE: no match found for search
473 * ENODEV: if device not found in list of registered devices
e1f60b29
NM
474 *
475 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
476 * protected pointer. The reason for the same is that the opp pointer which is
477 * returned will remain valid for use with opp_get_{voltage, freq} only while
478 * under the locked area. The pointer returned must be used prior to unlocking
479 * with rcu_read_unlock() to maintain the integrity of the pointer.
480 */
47d43ba7
NM
481struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
482 unsigned long *freq)
e1f60b29
NM
483{
484 struct device_opp *dev_opp;
47d43ba7 485 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
e1f60b29 486
b02ded24
DT
487 opp_rcu_lockdep_assert();
488
e1f60b29
NM
489 if (!dev || !freq) {
490 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
491 return ERR_PTR(-EINVAL);
492 }
493
327854c8 494 dev_opp = _find_device_opp(dev);
e1f60b29 495 if (IS_ERR(dev_opp))
0779726c 496 return ERR_CAST(dev_opp);
e1f60b29
NM
497
498 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
499 if (temp_opp->available) {
500 /* go to the next node, before choosing prev */
501 if (temp_opp->rate > *freq)
502 break;
503 else
504 opp = temp_opp;
505 }
506 }
507 if (!IS_ERR(opp))
508 *freq = opp->rate;
509
510 return opp;
511}
5d4879cd 512EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
e1f60b29 513
06441658
VK
514/* List-dev Helpers */
515static void _kfree_list_dev_rcu(struct rcu_head *head)
516{
517 struct device_list_opp *list_dev;
518
519 list_dev = container_of(head, struct device_list_opp, rcu_head);
520 kfree_rcu(list_dev, rcu_head);
521}
522
523static void _remove_list_dev(struct device_list_opp *list_dev,
524 struct device_opp *dev_opp)
525{
deaa5146 526 opp_debug_unregister(list_dev, dev_opp);
06441658
VK
527 list_del(&list_dev->node);
528 call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head,
529 _kfree_list_dev_rcu);
530}
531
f59d3ee8
VK
532struct device_list_opp *_add_list_dev(const struct device *dev,
533 struct device_opp *dev_opp)
06441658
VK
534{
535 struct device_list_opp *list_dev;
deaa5146 536 int ret;
06441658
VK
537
538 list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL);
539 if (!list_dev)
540 return NULL;
541
542 /* Initialize list-dev */
543 list_dev->dev = dev;
544 list_add_rcu(&list_dev->node, &dev_opp->dev_list);
545
deaa5146
VK
546 /* Create debugfs entries for the dev_opp */
547 ret = opp_debug_register(list_dev, dev_opp);
548 if (ret)
549 dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
550 __func__, ret);
551
06441658
VK
552 return list_dev;
553}
554
984f16c8 555/**
aa5f2f85 556 * _add_device_opp() - Find device OPP table or allocate a new one
984f16c8
NM
557 * @dev: device for which we do this operation
558 *
aa5f2f85
VK
559 * It tries to find an existing table first, if it couldn't find one, it
560 * allocates a new OPP table and returns that.
984f16c8
NM
561 *
562 * Return: valid device_opp pointer if success, else NULL.
563 */
327854c8 564static struct device_opp *_add_device_opp(struct device *dev)
07cce74a
VK
565{
566 struct device_opp *dev_opp;
06441658 567 struct device_list_opp *list_dev;
07cce74a 568
aa5f2f85
VK
569 /* Check for existing list for 'dev' first */
570 dev_opp = _find_device_opp(dev);
571 if (!IS_ERR(dev_opp))
572 return dev_opp;
07cce74a
VK
573
574 /*
575 * Allocate a new device OPP table. In the infrequent case where a new
576 * device is needed to be added, we pay this penalty.
577 */
578 dev_opp = kzalloc(sizeof(*dev_opp), GFP_KERNEL);
579 if (!dev_opp)
580 return NULL;
581
06441658
VK
582 INIT_LIST_HEAD(&dev_opp->dev_list);
583
584 list_dev = _add_list_dev(dev, dev_opp);
585 if (!list_dev) {
586 kfree(dev_opp);
587 return NULL;
588 }
589
07cce74a
VK
590 srcu_init_notifier_head(&dev_opp->srcu_head);
591 INIT_LIST_HEAD(&dev_opp->opp_list);
592
593 /* Secure the device list modification */
594 list_add_rcu(&dev_opp->node, &dev_opp_list);
595 return dev_opp;
596}
597
984f16c8 598/**
737002b5
VK
599 * _kfree_device_rcu() - Free device_opp RCU handler
600 * @head: RCU head
984f16c8 601 */
737002b5 602static void _kfree_device_rcu(struct rcu_head *head)
e1f60b29 603{
737002b5 604 struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
6ce4184d 605
737002b5 606 kfree_rcu(device_opp, rcu_head);
e1f60b29 607}
38393409
VK
608
609/**
3bac42ca
VK
610 * _remove_device_opp() - Removes a device OPP table
611 * @dev_opp: device OPP table to be removed.
38393409 612 *
3bac42ca 613 * Removes/frees device OPP table it it doesn't contain any OPPs.
38393409 614 */
3bac42ca 615static void _remove_device_opp(struct device_opp *dev_opp)
38393409 616{
06441658
VK
617 struct device_list_opp *list_dev;
618
3bac42ca
VK
619 if (!list_empty(&dev_opp->opp_list))
620 return;
621
7de36b0a
VK
622 if (dev_opp->supported_hw)
623 return;
624
01fb4d3c
VK
625 if (dev_opp->prop_name)
626 return;
627
9f8ea969
VK
628 if (!IS_ERR_OR_NULL(dev_opp->regulator))
629 return;
630
06441658
VK
631 list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp,
632 node);
633
634 _remove_list_dev(list_dev, dev_opp);
635
636 /* dev_list must be empty now */
637 WARN_ON(!list_empty(&dev_opp->dev_list));
638
3bac42ca
VK
639 list_del_rcu(&dev_opp->node);
640 call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
641 _kfree_device_rcu);
38393409 642}
e1f60b29 643
984f16c8
NM
644/**
645 * _kfree_opp_rcu() - Free OPP RCU handler
646 * @head: RCU head
647 */
327854c8 648static void _kfree_opp_rcu(struct rcu_head *head)
129eec55
VK
649{
650 struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
651
652 kfree_rcu(opp, rcu_head);
653}
654
984f16c8
NM
655/**
656 * _opp_remove() - Remove an OPP from a table definition
657 * @dev_opp: points back to the device_opp struct this opp belongs to
658 * @opp: pointer to the OPP to remove
23dacf6d 659 * @notify: OPP_EVENT_REMOVE notification should be sent or not
984f16c8
NM
660 *
661 * This function removes an opp definition from the opp list.
662 *
663 * Locking: The internal device_opp and opp structures are RCU protected.
664 * It is assumed that the caller holds required mutex for an RCU updater
665 * strategy.
666 */
327854c8 667static void _opp_remove(struct device_opp *dev_opp,
23dacf6d 668 struct dev_pm_opp *opp, bool notify)
129eec55
VK
669{
670 /*
671 * Notify the changes in the availability of the operable
672 * frequency/voltage list.
673 */
23dacf6d
VK
674 if (notify)
675 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
deaa5146 676 opp_debug_remove_one(opp);
129eec55 677 list_del_rcu(&opp->node);
327854c8 678 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
129eec55 679
3bac42ca 680 _remove_device_opp(dev_opp);
129eec55
VK
681}
682
683/**
684 * dev_pm_opp_remove() - Remove an OPP from OPP list
685 * @dev: device for which we do this operation
686 * @freq: OPP to remove with matching 'freq'
687 *
688 * This function removes an opp from the opp list.
984f16c8
NM
689 *
690 * Locking: The internal device_opp and opp structures are RCU protected.
691 * Hence this function internally uses RCU updater strategy with mutex locks
692 * to keep the integrity of the internal data structures. Callers should ensure
693 * that this function is *NOT* called under RCU protection or in contexts where
694 * mutex cannot be locked.
129eec55
VK
695 */
696void dev_pm_opp_remove(struct device *dev, unsigned long freq)
697{
698 struct dev_pm_opp *opp;
699 struct device_opp *dev_opp;
700 bool found = false;
701
702 /* Hold our list modification lock here */
703 mutex_lock(&dev_opp_list_lock);
704
327854c8 705 dev_opp = _find_device_opp(dev);
129eec55
VK
706 if (IS_ERR(dev_opp))
707 goto unlock;
708
709 list_for_each_entry(opp, &dev_opp->opp_list, node) {
710 if (opp->rate == freq) {
711 found = true;
712 break;
713 }
714 }
715
716 if (!found) {
717 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
718 __func__, freq);
719 goto unlock;
720 }
721
23dacf6d 722 _opp_remove(dev_opp, opp, true);
129eec55
VK
723unlock:
724 mutex_unlock(&dev_opp_list_lock);
725}
726EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
727
23dacf6d
VK
728static struct dev_pm_opp *_allocate_opp(struct device *dev,
729 struct device_opp **dev_opp)
e1f60b29 730{
23dacf6d 731 struct dev_pm_opp *opp;
e1f60b29 732
23dacf6d
VK
733 /* allocate new OPP node */
734 opp = kzalloc(sizeof(*opp), GFP_KERNEL);
735 if (!opp)
736 return NULL;
e1f60b29 737
23dacf6d 738 INIT_LIST_HEAD(&opp->node);
e1f60b29 739
23dacf6d
VK
740 *dev_opp = _add_device_opp(dev);
741 if (!*dev_opp) {
742 kfree(opp);
743 return NULL;
744 }
745
746 return opp;
747}
748
7d34d56e
VK
749static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
750 struct device_opp *dev_opp)
751{
752 struct regulator *reg = dev_opp->regulator;
753
754 if (!IS_ERR(reg) &&
755 !regulator_is_supported_voltage(reg, opp->u_volt_min,
756 opp->u_volt_max)) {
757 pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
758 __func__, opp->u_volt_min, opp->u_volt_max);
759 return false;
760 }
761
762 return true;
763}
764
06441658
VK
765static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
766 struct device_opp *dev_opp)
23dacf6d
VK
767{
768 struct dev_pm_opp *opp;
769 struct list_head *head = &dev_opp->opp_list;
deaa5146 770 int ret;
23dacf6d
VK
771
772 /*
773 * Insert new OPP in order of increasing frequency and discard if
774 * already present.
775 *
776 * Need to use &dev_opp->opp_list in the condition part of the 'for'
777 * loop, don't replace it with head otherwise it will become an infinite
778 * loop.
779 */
780 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
781 if (new_opp->rate > opp->rate) {
782 head = &opp->node;
783 continue;
784 }
785
786 if (new_opp->rate < opp->rate)
787 break;
788
789 /* Duplicate OPPs */
06441658 790 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
23dacf6d
VK
791 __func__, opp->rate, opp->u_volt, opp->available,
792 new_opp->rate, new_opp->u_volt, new_opp->available);
793
794 return opp->available && new_opp->u_volt == opp->u_volt ?
795 0 : -EEXIST;
796 }
797
798 new_opp->dev_opp = dev_opp;
799 list_add_rcu(&new_opp->node, head);
800
deaa5146
VK
801 ret = opp_debug_create_one(new_opp, dev_opp);
802 if (ret)
803 dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
804 __func__, ret);
805
7d34d56e
VK
806 if (!_opp_supported_by_regulators(new_opp, dev_opp)) {
807 new_opp->available = false;
808 dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
809 __func__, new_opp->rate);
810 }
811
23dacf6d
VK
812 return 0;
813}
814
984f16c8 815/**
b64b9c3f 816 * _opp_add_v1() - Allocate a OPP based on v1 bindings.
984f16c8
NM
817 * @dev: device for which we do this operation
818 * @freq: Frequency in Hz for this OPP
819 * @u_volt: Voltage in uVolts for this OPP
820 * @dynamic: Dynamically added OPPs.
821 *
822 * This function adds an opp definition to the opp list and returns status.
823 * The opp is made available by default and it can be controlled using
824 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
825 *
8f8d37b2
VK
826 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
827 * and freed by dev_pm_opp_of_remove_table.
984f16c8
NM
828 *
829 * Locking: The internal device_opp and opp structures are RCU protected.
830 * Hence this function internally uses RCU updater strategy with mutex locks
831 * to keep the integrity of the internal data structures. Callers should ensure
832 * that this function is *NOT* called under RCU protection or in contexts where
833 * mutex cannot be locked.
834 *
835 * Return:
836 * 0 On success OR
837 * Duplicate OPPs (both freq and volt are same) and opp->available
838 * -EEXIST Freq are same and volt are different OR
839 * Duplicate OPPs (both freq and volt are same) and !opp->available
840 * -ENOMEM Memory allocation failure
841 */
b64b9c3f
VK
842static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
843 bool dynamic)
e1f60b29 844{
aa5f2f85 845 struct device_opp *dev_opp;
23dacf6d 846 struct dev_pm_opp *new_opp;
6ce4184d 847 int ret;
e1f60b29 848
e1f60b29
NM
849 /* Hold our list modification lock here */
850 mutex_lock(&dev_opp_list_lock);
851
23dacf6d
VK
852 new_opp = _allocate_opp(dev, &dev_opp);
853 if (!new_opp) {
854 ret = -ENOMEM;
855 goto unlock;
856 }
857
a7470db6 858 /* populate the opp table */
a7470db6
VK
859 new_opp->rate = freq;
860 new_opp->u_volt = u_volt;
861 new_opp->available = true;
23dacf6d 862 new_opp->dynamic = dynamic;
a7470db6 863
06441658 864 ret = _opp_add(dev, new_opp, dev_opp);
23dacf6d 865 if (ret)
6ce4184d 866 goto free_opp;
64ce8545 867
e1f60b29
NM
868 mutex_unlock(&dev_opp_list_lock);
869
03ca370f
MH
870 /*
871 * Notify the changes in the availability of the operable
872 * frequency/voltage list.
873 */
cd1a068a 874 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
e1f60b29 875 return 0;
6ce4184d
VK
876
877free_opp:
23dacf6d
VK
878 _opp_remove(dev_opp, new_opp, false);
879unlock:
6ce4184d 880 mutex_unlock(&dev_opp_list_lock);
6ce4184d 881 return ret;
e1f60b29 882}
38393409 883
27465902 884/* TODO: Support multiple regulators */
01fb4d3c
VK
885static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
886 struct device_opp *dev_opp)
27465902
VK
887{
888 u32 microvolt[3] = {0};
ad623c31 889 u32 val;
27465902 890 int count, ret;
01fb4d3c
VK
891 struct property *prop = NULL;
892 char name[NAME_MAX];
893
894 /* Search for "opp-microvolt-<name>" */
895 if (dev_opp->prop_name) {
5ff24d60
VK
896 snprintf(name, sizeof(name), "opp-microvolt-%s",
897 dev_opp->prop_name);
01fb4d3c
VK
898 prop = of_find_property(opp->np, name, NULL);
899 }
900
901 if (!prop) {
902 /* Search for "opp-microvolt" */
fd8d8e63 903 sprintf(name, "opp-microvolt");
01fb4d3c 904 prop = of_find_property(opp->np, name, NULL);
27465902 905
01fb4d3c
VK
906 /* Missing property isn't a problem, but an invalid entry is */
907 if (!prop)
908 return 0;
909 }
27465902 910
01fb4d3c 911 count = of_property_count_u32_elems(opp->np, name);
680168a5 912 if (count < 0) {
01fb4d3c
VK
913 dev_err(dev, "%s: Invalid %s property (%d)\n",
914 __func__, name, count);
680168a5
VK
915 return count;
916 }
917
27465902
VK
918 /* There can be one or three elements here */
919 if (count != 1 && count != 3) {
01fb4d3c
VK
920 dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
921 __func__, name, count);
27465902
VK
922 return -EINVAL;
923 }
924
01fb4d3c 925 ret = of_property_read_u32_array(opp->np, name, microvolt, count);
27465902 926 if (ret) {
01fb4d3c 927 dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
27465902
VK
928 return -EINVAL;
929 }
930
931 opp->u_volt = microvolt[0];
932 opp->u_volt_min = microvolt[1];
933 opp->u_volt_max = microvolt[2];
934
01fb4d3c
VK
935 /* Search for "opp-microamp-<name>" */
936 prop = NULL;
937 if (dev_opp->prop_name) {
5ff24d60
VK
938 snprintf(name, sizeof(name), "opp-microamp-%s",
939 dev_opp->prop_name);
01fb4d3c
VK
940 prop = of_find_property(opp->np, name, NULL);
941 }
942
943 if (!prop) {
944 /* Search for "opp-microamp" */
fd8d8e63 945 sprintf(name, "opp-microamp");
01fb4d3c
VK
946 prop = of_find_property(opp->np, name, NULL);
947 }
948
949 if (prop && !of_property_read_u32(opp->np, name, &val))
ad623c31
VK
950 opp->u_amp = val;
951
27465902
VK
952 return 0;
953}
954
7de36b0a
VK
955/**
956 * dev_pm_opp_set_supported_hw() - Set supported platforms
957 * @dev: Device for which supported-hw has to be set.
958 * @versions: Array of hierarchy of versions to match.
959 * @count: Number of elements in the array.
960 *
961 * This is required only for the V2 bindings, and it enables a platform to
962 * specify the hierarchy of versions it supports. OPP layer will then enable
963 * OPPs, which are available for those versions, based on its 'opp-supported-hw'
964 * property.
965 *
966 * Locking: The internal device_opp and opp structures are RCU protected.
967 * Hence this function internally uses RCU updater strategy with mutex locks
968 * to keep the integrity of the internal data structures. Callers should ensure
969 * that this function is *NOT* called under RCU protection or in contexts where
970 * mutex cannot be locked.
971 */
972int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
973 unsigned int count)
974{
975 struct device_opp *dev_opp;
976 int ret = 0;
977
978 /* Hold our list modification lock here */
979 mutex_lock(&dev_opp_list_lock);
980
981 dev_opp = _add_device_opp(dev);
982 if (!dev_opp) {
983 ret = -ENOMEM;
984 goto unlock;
985 }
986
987 /* Make sure there are no concurrent readers while updating dev_opp */
988 WARN_ON(!list_empty(&dev_opp->opp_list));
989
990 /* Do we already have a version hierarchy associated with dev_opp? */
991 if (dev_opp->supported_hw) {
992 dev_err(dev, "%s: Already have supported hardware list\n",
993 __func__);
994 ret = -EBUSY;
995 goto err;
996 }
997
998 dev_opp->supported_hw = kmemdup(versions, count * sizeof(*versions),
999 GFP_KERNEL);
1000 if (!dev_opp->supported_hw) {
1001 ret = -ENOMEM;
1002 goto err;
1003 }
1004
1005 dev_opp->supported_hw_count = count;
1006 mutex_unlock(&dev_opp_list_lock);
1007 return 0;
1008
1009err:
1010 _remove_device_opp(dev_opp);
1011unlock:
1012 mutex_unlock(&dev_opp_list_lock);
1013
1014 return ret;
1015}
1016EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
1017
1018/**
1019 * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
1020 * @dev: Device for which supported-hw has to be set.
1021 *
1022 * This is required only for the V2 bindings, and is called for a matching
1023 * dev_pm_opp_set_supported_hw(). Until this is called, the device_opp structure
1024 * will not be freed.
1025 *
1026 * Locking: The internal device_opp and opp structures are RCU protected.
1027 * Hence this function internally uses RCU updater strategy with mutex locks
1028 * to keep the integrity of the internal data structures. Callers should ensure
1029 * that this function is *NOT* called under RCU protection or in contexts where
1030 * mutex cannot be locked.
1031 */
1032void dev_pm_opp_put_supported_hw(struct device *dev)
1033{
1034 struct device_opp *dev_opp;
1035
1036 /* Hold our list modification lock here */
1037 mutex_lock(&dev_opp_list_lock);
1038
1039 /* Check for existing list for 'dev' first */
1040 dev_opp = _find_device_opp(dev);
1041 if (IS_ERR(dev_opp)) {
1042 dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp));
1043 goto unlock;
1044 }
1045
1046 /* Make sure there are no concurrent readers while updating dev_opp */
1047 WARN_ON(!list_empty(&dev_opp->opp_list));
1048
1049 if (!dev_opp->supported_hw) {
1050 dev_err(dev, "%s: Doesn't have supported hardware list\n",
1051 __func__);
1052 goto unlock;
1053 }
1054
1055 kfree(dev_opp->supported_hw);
1056 dev_opp->supported_hw = NULL;
1057 dev_opp->supported_hw_count = 0;
1058
1059 /* Try freeing device_opp if this was the last blocking resource */
1060 _remove_device_opp(dev_opp);
1061
1062unlock:
1063 mutex_unlock(&dev_opp_list_lock);
1064}
1065EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
1066
01fb4d3c
VK
1067/**
1068 * dev_pm_opp_set_prop_name() - Set prop-extn name
1069 * @dev: Device for which the regulator has to be set.
1070 * @name: name to postfix to properties.
1071 *
1072 * This is required only for the V2 bindings, and it enables a platform to
1073 * specify the extn to be used for certain property names. The properties to
1074 * which the extension will apply are opp-microvolt and opp-microamp. OPP core
1075 * should postfix the property name with -<name> while looking for them.
1076 *
1077 * Locking: The internal device_opp and opp structures are RCU protected.
1078 * Hence this function internally uses RCU updater strategy with mutex locks
1079 * to keep the integrity of the internal data structures. Callers should ensure
1080 * that this function is *NOT* called under RCU protection or in contexts where
1081 * mutex cannot be locked.
1082 */
1083int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
1084{
1085 struct device_opp *dev_opp;
1086 int ret = 0;
1087
1088 /* Hold our list modification lock here */
1089 mutex_lock(&dev_opp_list_lock);
1090
1091 dev_opp = _add_device_opp(dev);
1092 if (!dev_opp) {
1093 ret = -ENOMEM;
1094 goto unlock;
1095 }
1096
1097 /* Make sure there are no concurrent readers while updating dev_opp */
1098 WARN_ON(!list_empty(&dev_opp->opp_list));
1099
1100 /* Do we already have a prop-name associated with dev_opp? */
1101 if (dev_opp->prop_name) {
1102 dev_err(dev, "%s: Already have prop-name %s\n", __func__,
1103 dev_opp->prop_name);
1104 ret = -EBUSY;
1105 goto err;
1106 }
1107
1108 dev_opp->prop_name = kstrdup(name, GFP_KERNEL);
1109 if (!dev_opp->prop_name) {
1110 ret = -ENOMEM;
1111 goto err;
1112 }
1113
1114 mutex_unlock(&dev_opp_list_lock);
1115 return 0;
1116
1117err:
1118 _remove_device_opp(dev_opp);
1119unlock:
1120 mutex_unlock(&dev_opp_list_lock);
1121
1122 return ret;
1123}
1124EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
1125
1126/**
1127 * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
1128 * @dev: Device for which the regulator has to be set.
1129 *
1130 * This is required only for the V2 bindings, and is called for a matching
1131 * dev_pm_opp_set_prop_name(). Until this is called, the device_opp structure
1132 * will not be freed.
1133 *
1134 * Locking: The internal device_opp and opp structures are RCU protected.
1135 * Hence this function internally uses RCU updater strategy with mutex locks
1136 * to keep the integrity of the internal data structures. Callers should ensure
1137 * that this function is *NOT* called under RCU protection or in contexts where
1138 * mutex cannot be locked.
1139 */
1140void dev_pm_opp_put_prop_name(struct device *dev)
1141{
1142 struct device_opp *dev_opp;
1143
1144 /* Hold our list modification lock here */
1145 mutex_lock(&dev_opp_list_lock);
1146
1147 /* Check for existing list for 'dev' first */
1148 dev_opp = _find_device_opp(dev);
1149 if (IS_ERR(dev_opp)) {
1150 dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp));
1151 goto unlock;
1152 }
1153
1154 /* Make sure there are no concurrent readers while updating dev_opp */
1155 WARN_ON(!list_empty(&dev_opp->opp_list));
1156
1157 if (!dev_opp->prop_name) {
1158 dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
1159 goto unlock;
1160 }
1161
1162 kfree(dev_opp->prop_name);
1163 dev_opp->prop_name = NULL;
1164
1165 /* Try freeing device_opp if this was the last blocking resource */
1166 _remove_device_opp(dev_opp);
1167
1168unlock:
1169 mutex_unlock(&dev_opp_list_lock);
1170}
1171EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
1172
9f8ea969
VK
1173/**
1174 * dev_pm_opp_set_regulator() - Set regulator name for the device
1175 * @dev: Device for which regulator name is being set.
1176 * @name: Name of the regulator.
1177 *
1178 * In order to support OPP switching, OPP layer needs to know the name of the
1179 * device's regulator, as the core would be required to switch voltages as well.
1180 *
1181 * This must be called before any OPPs are initialized for the device.
1182 *
1183 * Locking: The internal device_opp and opp structures are RCU protected.
1184 * Hence this function internally uses RCU updater strategy with mutex locks
1185 * to keep the integrity of the internal data structures. Callers should ensure
1186 * that this function is *NOT* called under RCU protection or in contexts where
1187 * mutex cannot be locked.
1188 */
1189int dev_pm_opp_set_regulator(struct device *dev, const char *name)
1190{
1191 struct device_opp *dev_opp;
1192 struct regulator *reg;
1193 int ret;
1194
1195 mutex_lock(&dev_opp_list_lock);
1196
1197 dev_opp = _add_device_opp(dev);
1198 if (!dev_opp) {
1199 ret = -ENOMEM;
1200 goto unlock;
1201 }
1202
1203 /* This should be called before OPPs are initialized */
1204 if (WARN_ON(!list_empty(&dev_opp->opp_list))) {
1205 ret = -EBUSY;
1206 goto err;
1207 }
1208
1209 /* Already have a regulator set */
1210 if (WARN_ON(!IS_ERR_OR_NULL(dev_opp->regulator))) {
1211 ret = -EBUSY;
1212 goto err;
1213 }
1214 /* Allocate the regulator */
1215 reg = regulator_get_optional(dev, name);
1216 if (IS_ERR(reg)) {
1217 ret = PTR_ERR(reg);
1218 if (ret != -EPROBE_DEFER)
1219 dev_err(dev, "%s: no regulator (%s) found: %d\n",
1220 __func__, name, ret);
1221 goto err;
1222 }
1223
1224 dev_opp->regulator = reg;
1225
1226 mutex_unlock(&dev_opp_list_lock);
1227 return 0;
1228
1229err:
1230 _remove_device_opp(dev_opp);
1231unlock:
1232 mutex_unlock(&dev_opp_list_lock);
1233
1234 return ret;
1235}
1236EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
1237
1238/**
1239 * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
1240 * @dev: Device for which regulator was set.
1241 *
1242 * Locking: The internal device_opp and opp structures are RCU protected.
1243 * Hence this function internally uses RCU updater strategy with mutex locks
1244 * to keep the integrity of the internal data structures. Callers should ensure
1245 * that this function is *NOT* called under RCU protection or in contexts where
1246 * mutex cannot be locked.
1247 */
1248void dev_pm_opp_put_regulator(struct device *dev)
1249{
1250 struct device_opp *dev_opp;
1251
1252 mutex_lock(&dev_opp_list_lock);
1253
1254 /* Check for existing list for 'dev' first */
1255 dev_opp = _find_device_opp(dev);
1256 if (IS_ERR(dev_opp)) {
1257 dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp));
1258 goto unlock;
1259 }
1260
1261 if (IS_ERR_OR_NULL(dev_opp->regulator)) {
1262 dev_err(dev, "%s: Doesn't have regulator set\n", __func__);
1263 goto unlock;
1264 }
1265
1266 /* Make sure there are no concurrent readers while updating dev_opp */
1267 WARN_ON(!list_empty(&dev_opp->opp_list));
1268
1269 regulator_put(dev_opp->regulator);
1270 dev_opp->regulator = ERR_PTR(-EINVAL);
1271
1272 /* Try freeing device_opp if this was the last blocking resource */
1273 _remove_device_opp(dev_opp);
1274
1275unlock:
1276 mutex_unlock(&dev_opp_list_lock);
1277}
1278EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
1279
7de36b0a
VK
1280static bool _opp_is_supported(struct device *dev, struct device_opp *dev_opp,
1281 struct device_node *np)
1282{
1283 unsigned int count = dev_opp->supported_hw_count;
1284 u32 version;
1285 int ret;
1286
1287 if (!dev_opp->supported_hw)
1288 return true;
1289
1290 while (count--) {
1291 ret = of_property_read_u32_index(np, "opp-supported-hw", count,
1292 &version);
1293 if (ret) {
1294 dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
1295 __func__, count, ret);
1296 return false;
1297 }
1298
1299 /* Both of these are bitwise masks of the versions */
1300 if (!(version & dev_opp->supported_hw[count]))
1301 return false;
1302 }
1303
1304 return true;
1305}
1306
27465902
VK
1307/**
1308 * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
1309 * @dev: device for which we do this operation
1310 * @np: device node
1311 *
1312 * This function adds an opp definition to the opp list and returns status. The
1313 * opp can be controlled using dev_pm_opp_enable/disable functions and may be
1314 * removed by dev_pm_opp_remove.
1315 *
1316 * Locking: The internal device_opp and opp structures are RCU protected.
1317 * Hence this function internally uses RCU updater strategy with mutex locks
1318 * to keep the integrity of the internal data structures. Callers should ensure
1319 * that this function is *NOT* called under RCU protection or in contexts where
1320 * mutex cannot be locked.
1321 *
1322 * Return:
1323 * 0 On success OR
1324 * Duplicate OPPs (both freq and volt are same) and opp->available
1325 * -EEXIST Freq are same and volt are different OR
1326 * Duplicate OPPs (both freq and volt are same) and !opp->available
1327 * -ENOMEM Memory allocation failure
1328 * -EINVAL Failed parsing the OPP node
1329 */
1330static int _opp_add_static_v2(struct device *dev, struct device_node *np)
1331{
1332 struct device_opp *dev_opp;
1333 struct dev_pm_opp *new_opp;
1334 u64 rate;
68fa9f0a 1335 u32 val;
27465902
VK
1336 int ret;
1337
1338 /* Hold our list modification lock here */
1339 mutex_lock(&dev_opp_list_lock);
1340
1341 new_opp = _allocate_opp(dev, &dev_opp);
1342 if (!new_opp) {
1343 ret = -ENOMEM;
1344 goto unlock;
1345 }
1346
1347 ret = of_property_read_u64(np, "opp-hz", &rate);
1348 if (ret < 0) {
1349 dev_err(dev, "%s: opp-hz not found\n", __func__);
1350 goto free_opp;
1351 }
1352
7de36b0a
VK
1353 /* Check if the OPP supports hardware's hierarchy of versions or not */
1354 if (!_opp_is_supported(dev, dev_opp, np)) {
1355 dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
1356 goto free_opp;
1357 }
1358
27465902
VK
1359 /*
1360 * Rate is defined as an unsigned long in clk API, and so casting
1361 * explicitly to its type. Must be fixed once rate is 64 bit
1362 * guaranteed in clk API.
1363 */
1364 new_opp->rate = (unsigned long)rate;
1365 new_opp->turbo = of_property_read_bool(np, "turbo-mode");
1366
1367 new_opp->np = np;
1368 new_opp->dynamic = false;
1369 new_opp->available = true;
68fa9f0a
VK
1370
1371 if (!of_property_read_u32(np, "clock-latency-ns", &val))
1372 new_opp->clock_latency_ns = val;
27465902 1373
01fb4d3c 1374 ret = opp_parse_supplies(new_opp, dev, dev_opp);
27465902
VK
1375 if (ret)
1376 goto free_opp;
1377
06441658 1378 ret = _opp_add(dev, new_opp, dev_opp);
27465902
VK
1379 if (ret)
1380 goto free_opp;
1381
ad656a6a
VK
1382 /* OPP to select on device suspend */
1383 if (of_property_read_bool(np, "opp-suspend")) {
deaa5146 1384 if (dev_opp->suspend_opp) {
ad656a6a
VK
1385 dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
1386 __func__, dev_opp->suspend_opp->rate,
1387 new_opp->rate);
deaa5146
VK
1388 } else {
1389 new_opp->suspend = true;
ad656a6a 1390 dev_opp->suspend_opp = new_opp;
deaa5146 1391 }
ad656a6a
VK
1392 }
1393
3ca9bb33
VK
1394 if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max)
1395 dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns;
1396
27465902
VK
1397 mutex_unlock(&dev_opp_list_lock);
1398
3ca9bb33 1399 pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
27465902 1400 __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
3ca9bb33
VK
1401 new_opp->u_volt_min, new_opp->u_volt_max,
1402 new_opp->clock_latency_ns);
27465902
VK
1403
1404 /*
1405 * Notify the changes in the availability of the operable
1406 * frequency/voltage list.
1407 */
1408 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
1409 return 0;
1410
1411free_opp:
1412 _opp_remove(dev_opp, new_opp, false);
1413unlock:
1414 mutex_unlock(&dev_opp_list_lock);
1415 return ret;
1416}
1417
38393409
VK
1418/**
1419 * dev_pm_opp_add() - Add an OPP table from a table definitions
1420 * @dev: device for which we do this operation
1421 * @freq: Frequency in Hz for this OPP
1422 * @u_volt: Voltage in uVolts for this OPP
1423 *
1424 * This function adds an opp definition to the opp list and returns status.
1425 * The opp is made available by default and it can be controlled using
1426 * dev_pm_opp_enable/disable functions.
1427 *
1428 * Locking: The internal device_opp and opp structures are RCU protected.
1429 * Hence this function internally uses RCU updater strategy with mutex locks
1430 * to keep the integrity of the internal data structures. Callers should ensure
1431 * that this function is *NOT* called under RCU protection or in contexts where
1432 * mutex cannot be locked.
1433 *
1434 * Return:
984f16c8 1435 * 0 On success OR
38393409 1436 * Duplicate OPPs (both freq and volt are same) and opp->available
984f16c8 1437 * -EEXIST Freq are same and volt are different OR
38393409 1438 * Duplicate OPPs (both freq and volt are same) and !opp->available
984f16c8 1439 * -ENOMEM Memory allocation failure
38393409
VK
1440 */
1441int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
1442{
b64b9c3f 1443 return _opp_add_v1(dev, freq, u_volt, true);
38393409 1444}
5d4879cd 1445EXPORT_SYMBOL_GPL(dev_pm_opp_add);
e1f60b29
NM
1446
1447/**
327854c8 1448 * _opp_set_availability() - helper to set the availability of an opp
e1f60b29
NM
1449 * @dev: device for which we do this operation
1450 * @freq: OPP frequency to modify availability
1451 * @availability_req: availability status requested for this opp
1452 *
1453 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
1454 * share a common logic which is isolated here.
1455 *
984f16c8 1456 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
e1a2d49c 1457 * copy operation, returns 0 if no modification was done OR modification was
e1f60b29
NM
1458 * successful.
1459 *
1460 * Locking: The internal device_opp and opp structures are RCU protected.
1461 * Hence this function internally uses RCU updater strategy with mutex locks to
1462 * keep the integrity of the internal data structures. Callers should ensure
1463 * that this function is *NOT* called under RCU protection or in contexts where
1464 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1465 */
327854c8
NM
1466static int _opp_set_availability(struct device *dev, unsigned long freq,
1467 bool availability_req)
e1f60b29 1468{
29df0ee1 1469 struct device_opp *dev_opp;
47d43ba7 1470 struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
e1f60b29
NM
1471 int r = 0;
1472
1473 /* keep the node allocated */
47d43ba7 1474 new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
59d84ca8 1475 if (!new_opp)
e1f60b29 1476 return -ENOMEM;
e1f60b29
NM
1477
1478 mutex_lock(&dev_opp_list_lock);
1479
1480 /* Find the device_opp */
327854c8 1481 dev_opp = _find_device_opp(dev);
e1f60b29
NM
1482 if (IS_ERR(dev_opp)) {
1483 r = PTR_ERR(dev_opp);
1484 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
1485 goto unlock;
1486 }
1487
1488 /* Do we have the frequency? */
1489 list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
1490 if (tmp_opp->rate == freq) {
1491 opp = tmp_opp;
1492 break;
1493 }
1494 }
1495 if (IS_ERR(opp)) {
1496 r = PTR_ERR(opp);
1497 goto unlock;
1498 }
1499
1500 /* Is update really needed? */
1501 if (opp->available == availability_req)
1502 goto unlock;
1503 /* copy the old data over */
1504 *new_opp = *opp;
1505
1506 /* plug in new node */
1507 new_opp->available = availability_req;
1508
1509 list_replace_rcu(&opp->node, &new_opp->node);
1510 mutex_unlock(&dev_opp_list_lock);
327854c8 1511 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
e1f60b29 1512
03ca370f
MH
1513 /* Notify the change of the OPP availability */
1514 if (availability_req)
cd1a068a 1515 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ENABLE,
03ca370f
MH
1516 new_opp);
1517 else
cd1a068a 1518 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_DISABLE,
03ca370f
MH
1519 new_opp);
1520
dde8437d 1521 return 0;
e1f60b29
NM
1522
1523unlock:
1524 mutex_unlock(&dev_opp_list_lock);
e1f60b29
NM
1525 kfree(new_opp);
1526 return r;
1527}
1528
1529/**
5d4879cd 1530 * dev_pm_opp_enable() - Enable a specific OPP
e1f60b29
NM
1531 * @dev: device for which we do this operation
1532 * @freq: OPP frequency to enable
1533 *
1534 * Enables a provided opp. If the operation is valid, this returns 0, else the
1535 * corresponding error value. It is meant to be used for users an OPP available
5d4879cd 1536 * after being temporarily made unavailable with dev_pm_opp_disable.
e1f60b29
NM
1537 *
1538 * Locking: The internal device_opp and opp structures are RCU protected.
1539 * Hence this function indirectly uses RCU and mutex locks to keep the
1540 * integrity of the internal data structures. Callers should ensure that
1541 * this function is *NOT* called under RCU protection or in contexts where
1542 * mutex locking or synchronize_rcu() blocking calls cannot be used.
984f16c8
NM
1543 *
1544 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
e1a2d49c 1545 * copy operation, returns 0 if no modification was done OR modification was
984f16c8 1546 * successful.
e1f60b29 1547 */
5d4879cd 1548int dev_pm_opp_enable(struct device *dev, unsigned long freq)
e1f60b29 1549{
327854c8 1550 return _opp_set_availability(dev, freq, true);
e1f60b29 1551}
5d4879cd 1552EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
e1f60b29
NM
1553
1554/**
5d4879cd 1555 * dev_pm_opp_disable() - Disable a specific OPP
e1f60b29
NM
1556 * @dev: device for which we do this operation
1557 * @freq: OPP frequency to disable
1558 *
1559 * Disables a provided opp. If the operation is valid, this returns
1560 * 0, else the corresponding error value. It is meant to be a temporary
1561 * control by users to make this OPP not available until the circumstances are
5d4879cd 1562 * right to make it available again (with a call to dev_pm_opp_enable).
e1f60b29
NM
1563 *
1564 * Locking: The internal device_opp and opp structures are RCU protected.
1565 * Hence this function indirectly uses RCU and mutex locks to keep the
1566 * integrity of the internal data structures. Callers should ensure that
1567 * this function is *NOT* called under RCU protection or in contexts where
1568 * mutex locking or synchronize_rcu() blocking calls cannot be used.
984f16c8
NM
1569 *
1570 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
e1a2d49c 1571 * copy operation, returns 0 if no modification was done OR modification was
984f16c8 1572 * successful.
e1f60b29 1573 */
5d4879cd 1574int dev_pm_opp_disable(struct device *dev, unsigned long freq)
e1f60b29 1575{
327854c8 1576 return _opp_set_availability(dev, freq, false);
e1f60b29 1577}
5d4879cd 1578EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
e1f60b29 1579
03ca370f 1580/**
5d4879cd 1581 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
03ca370f 1582 * @dev: device pointer used to lookup device OPPs.
984f16c8
NM
1583 *
1584 * Return: pointer to notifier head if found, otherwise -ENODEV or
1585 * -EINVAL based on type of error casted as pointer. value must be checked
1586 * with IS_ERR to determine valid pointer or error result.
1587 *
1588 * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU
1589 * protected pointer. The reason for the same is that the opp pointer which is
1590 * returned will remain valid for use with opp_get_{voltage, freq} only while
1591 * under the locked area. The pointer returned must be used prior to unlocking
1592 * with rcu_read_unlock() to maintain the integrity of the pointer.
03ca370f 1593 */
5d4879cd 1594struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
03ca370f 1595{
327854c8 1596 struct device_opp *dev_opp = _find_device_opp(dev);
03ca370f
MH
1597
1598 if (IS_ERR(dev_opp))
156acb16 1599 return ERR_CAST(dev_opp); /* matching type */
03ca370f 1600
cd1a068a 1601 return &dev_opp->srcu_head;
03ca370f 1602}
4679ec37 1603EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
b496dfbc
SG
1604
1605#ifdef CONFIG_OF
1606/**
8f8d37b2
VK
1607 * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
1608 * entries
b496dfbc
SG
1609 * @dev: device pointer used to lookup device OPPs.
1610 *
737002b5 1611 * Free OPPs created using static entries present in DT.
984f16c8
NM
1612 *
1613 * Locking: The internal device_opp and opp structures are RCU protected.
1614 * Hence this function indirectly uses RCU updater strategy with mutex locks
1615 * to keep the integrity of the internal data structures. Callers should ensure
1616 * that this function is *NOT* called under RCU protection or in contexts where
1617 * mutex cannot be locked.
b496dfbc 1618 */
8f8d37b2 1619void dev_pm_opp_of_remove_table(struct device *dev)
737002b5
VK
1620{
1621 struct device_opp *dev_opp;
1622 struct dev_pm_opp *opp, *tmp;
1623
06441658
VK
1624 /* Hold our list modification lock here */
1625 mutex_lock(&dev_opp_list_lock);
1626
737002b5
VK
1627 /* Check for existing list for 'dev' */
1628 dev_opp = _find_device_opp(dev);
1629 if (IS_ERR(dev_opp)) {
1630 int error = PTR_ERR(dev_opp);
1631
1632 if (error != -ENODEV)
1633 WARN(1, "%s: dev_opp: %d\n",
1634 IS_ERR_OR_NULL(dev) ?
1635 "Invalid device" : dev_name(dev),
1636 error);
06441658 1637 goto unlock;
737002b5
VK
1638 }
1639
06441658
VK
1640 /* Find if dev_opp manages a single device */
1641 if (list_is_singular(&dev_opp->dev_list)) {
1642 /* Free static OPPs */
1643 list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
1644 if (!opp->dynamic)
1645 _opp_remove(dev_opp, opp, true);
1646 }
1647 } else {
1648 _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp);
737002b5
VK
1649 }
1650
06441658 1651unlock:
737002b5
VK
1652 mutex_unlock(&dev_opp_list_lock);
1653}
8f8d37b2 1654EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
737002b5 1655
1840995c 1656/* Returns opp descriptor node for a device, caller must do of_node_put() */
f59d3ee8 1657struct device_node *_of_get_opp_desc_node(struct device *dev)
8d4d4e98 1658{
8d4d4e98
VK
1659 /*
1660 * TODO: Support for multiple OPP tables.
1661 *
1662 * There should be only ONE phandle present in "operating-points-v2"
1663 * property.
1664 */
8d4d4e98 1665
1840995c 1666 return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
8d4d4e98
VK
1667}
1668
27465902 1669/* Initializes OPP tables based on new bindings */
f0489a5e 1670static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
27465902 1671{
1840995c 1672 struct device_node *np;
06441658 1673 struct device_opp *dev_opp;
27465902
VK
1674 int ret = 0, count = 0;
1675
4a3a1353
VK
1676 mutex_lock(&dev_opp_list_lock);
1677
06441658
VK
1678 dev_opp = _managed_opp(opp_np);
1679 if (dev_opp) {
1680 /* OPPs are already managed */
1681 if (!_add_list_dev(dev, dev_opp))
1682 ret = -ENOMEM;
4a3a1353 1683 mutex_unlock(&dev_opp_list_lock);
1840995c 1684 return ret;
06441658 1685 }
4a3a1353 1686 mutex_unlock(&dev_opp_list_lock);
06441658 1687
27465902
VK
1688 /* We have opp-list node now, iterate over it and add OPPs */
1689 for_each_available_child_of_node(opp_np, np) {
1690 count++;
1691
1692 ret = _opp_add_static_v2(dev, np);
1693 if (ret) {
1694 dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
1695 ret);
1f821ed7 1696 goto free_table;
27465902
VK
1697 }
1698 }
1699
1700 /* There should be one of more OPP defined */
1840995c
VK
1701 if (WARN_ON(!count))
1702 return -ENOENT;
27465902 1703
4a3a1353
VK
1704 mutex_lock(&dev_opp_list_lock);
1705
1f821ed7
VK
1706 dev_opp = _find_device_opp(dev);
1707 if (WARN_ON(IS_ERR(dev_opp))) {
1708 ret = PTR_ERR(dev_opp);
4a3a1353 1709 mutex_unlock(&dev_opp_list_lock);
1f821ed7 1710 goto free_table;
06441658 1711 }
27465902 1712
1f821ed7
VK
1713 dev_opp->np = opp_np;
1714 dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared");
1715
4a3a1353
VK
1716 mutex_unlock(&dev_opp_list_lock);
1717
1f821ed7
VK
1718 return 0;
1719
1720free_table:
8f8d37b2 1721 dev_pm_opp_of_remove_table(dev);
27465902
VK
1722
1723 return ret;
1724}
1725
1726/* Initializes OPP tables based on old-deprecated bindings */
f0489a5e 1727static int _of_add_opp_table_v1(struct device *dev)
b496dfbc
SG
1728{
1729 const struct property *prop;
1730 const __be32 *val;
1731 int nr;
1732
1733 prop = of_find_property(dev->of_node, "operating-points", NULL);
1734 if (!prop)
1735 return -ENODEV;
1736 if (!prop->value)
1737 return -ENODATA;
1738
1739 /*
1740 * Each OPP is a set of tuples consisting of frequency and
1741 * voltage like <freq-kHz vol-uV>.
1742 */
1743 nr = prop->length / sizeof(u32);
1744 if (nr % 2) {
1745 dev_err(dev, "%s: Invalid OPP list\n", __func__);
1746 return -EINVAL;
1747 }
1748
1749 val = prop->value;
1750 while (nr) {
1751 unsigned long freq = be32_to_cpup(val++) * 1000;
1752 unsigned long volt = be32_to_cpup(val++);
1753
b64b9c3f 1754 if (_opp_add_v1(dev, freq, volt, false))
b496dfbc
SG
1755 dev_warn(dev, "%s: Failed to add OPP %ld\n",
1756 __func__, freq);
b496dfbc
SG
1757 nr -= 2;
1758 }
1759
1760 return 0;
1761}
129eec55
VK
1762
1763/**
8f8d37b2 1764 * dev_pm_opp_of_add_table() - Initialize opp table from device tree
129eec55
VK
1765 * @dev: device pointer used to lookup device OPPs.
1766 *
27465902 1767 * Register the initial OPP table with the OPP library for given device.
984f16c8
NM
1768 *
1769 * Locking: The internal device_opp and opp structures are RCU protected.
1770 * Hence this function indirectly uses RCU updater strategy with mutex locks
1771 * to keep the integrity of the internal data structures. Callers should ensure
1772 * that this function is *NOT* called under RCU protection or in contexts where
1773 * mutex cannot be locked.
27465902
VK
1774 *
1775 * Return:
1776 * 0 On success OR
1777 * Duplicate OPPs (both freq and volt are same) and opp->available
1778 * -EEXIST Freq are same and volt are different OR
1779 * Duplicate OPPs (both freq and volt are same) and !opp->available
1780 * -ENOMEM Memory allocation failure
1781 * -ENODEV when 'operating-points' property is not found or is invalid data
1782 * in device node.
1783 * -ENODATA when empty 'operating-points' property is found
1784 * -EINVAL when invalid entries are found in opp-v2 table
129eec55 1785 */
8f8d37b2 1786int dev_pm_opp_of_add_table(struct device *dev)
129eec55 1787{
1840995c
VK
1788 struct device_node *opp_np;
1789 int ret;
27465902
VK
1790
1791 /*
1792 * OPPs have two version of bindings now. The older one is deprecated,
1793 * try for the new binding first.
1794 */
1840995c
VK
1795 opp_np = _of_get_opp_desc_node(dev);
1796 if (!opp_np) {
27465902
VK
1797 /*
1798 * Try old-deprecated bindings for backward compatibility with
1799 * older dtbs.
1800 */
f0489a5e 1801 return _of_add_opp_table_v1(dev);
8d4d4e98
VK
1802 }
1803
f0489a5e 1804 ret = _of_add_opp_table_v2(dev, opp_np);
1840995c 1805 of_node_put(opp_np);
8d4d4e98 1806
8d4d4e98
VK
1807 return ret;
1808}
8f8d37b2 1809EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
b496dfbc 1810#endif