Commit | Line | Data |
---|---|---|
e1f60b29 NM |
1 | /* |
2 | * Generic OPP Interface | |
3 | * | |
4 | * Copyright (C) 2009-2010 Texas Instruments Incorporated. | |
5 | * Nishanth Menon | |
6 | * Romit Dasgupta | |
7 | * Kevin Hilman | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | */ | |
13 | ||
d6d2a528 VK |
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
15 | ||
d54974c2 | 16 | #include <linux/clk.h> |
e1f60b29 NM |
17 | #include <linux/errno.h> |
18 | #include <linux/err.h> | |
e1f60b29 | 19 | #include <linux/slab.h> |
51990e82 | 20 | #include <linux/device.h> |
80126ce7 | 21 | #include <linux/export.h> |
9f8ea969 | 22 | #include <linux/regulator/consumer.h> |
e1f60b29 | 23 | |
f59d3ee8 | 24 | #include "opp.h" |
e1f60b29 NM |
25 | |
26 | /* | |
2c2709dc VK |
27 | * The root of the list of all opp-tables. All opp_table structures branch off |
28 | * from here, with each opp_table containing the list of opps it supports in | |
e1f60b29 NM |
29 | * various states of availability. |
30 | */ | |
f47b72a1 | 31 | LIST_HEAD(opp_tables); |
e1f60b29 | 32 | /* Lock to allow exclusive modification to the device and opp lists */ |
2c2709dc | 33 | DEFINE_MUTEX(opp_table_lock); |
e1f60b29 | 34 | |
b02ded24 DT |
35 | #define opp_rcu_lockdep_assert() \ |
36 | do { \ | |
f78f5b90 | 37 | RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ |
2c2709dc VK |
38 | !lockdep_is_held(&opp_table_lock), \ |
39 | "Missing rcu_read_lock() or " \ | |
40 | "opp_table_lock protection"); \ | |
b02ded24 DT |
41 | } while (0) |
42 | ||
2c2709dc VK |
43 | static struct opp_device *_find_opp_dev(const struct device *dev, |
44 | struct opp_table *opp_table) | |
06441658 | 45 | { |
2c2709dc | 46 | struct opp_device *opp_dev; |
06441658 | 47 | |
2c2709dc VK |
48 | list_for_each_entry(opp_dev, &opp_table->dev_list, node) |
49 | if (opp_dev->dev == dev) | |
50 | return opp_dev; | |
06441658 VK |
51 | |
52 | return NULL; | |
53 | } | |
54 | ||
e1f60b29 | 55 | /** |
2c2709dc VK |
56 | * _find_opp_table() - find opp_table struct using device pointer |
57 | * @dev: device pointer used to lookup OPP table | |
e1f60b29 | 58 | * |
2c2709dc VK |
59 | * Search OPP table for one containing matching device. Does a RCU reader |
60 | * operation to grab the pointer needed. | |
e1f60b29 | 61 | * |
2c2709dc | 62 | * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or |
e1f60b29 NM |
63 | * -EINVAL based on type of error. |
64 | * | |
0597e818 | 65 | * Locking: For readers, this function must be called under rcu_read_lock(). |
2c2709dc | 66 | * opp_table is a RCU protected pointer, which means that opp_table is valid |
0597e818 VK |
67 | * as long as we are under RCU lock. |
68 | * | |
2c2709dc | 69 | * For Writers, this function must be called with opp_table_lock held. |
e1f60b29 | 70 | */ |
2c2709dc | 71 | struct opp_table *_find_opp_table(struct device *dev) |
e1f60b29 | 72 | { |
2c2709dc | 73 | struct opp_table *opp_table; |
e1f60b29 | 74 | |
0597e818 VK |
75 | opp_rcu_lockdep_assert(); |
76 | ||
50a3cb04 | 77 | if (IS_ERR_OR_NULL(dev)) { |
e1f60b29 NM |
78 | pr_err("%s: Invalid parameters\n", __func__); |
79 | return ERR_PTR(-EINVAL); | |
80 | } | |
81 | ||
2c2709dc VK |
82 | list_for_each_entry_rcu(opp_table, &opp_tables, node) |
83 | if (_find_opp_dev(dev, opp_table)) | |
84 | return opp_table; | |
e1f60b29 | 85 | |
06441658 | 86 | return ERR_PTR(-ENODEV); |
e1f60b29 NM |
87 | } |
88 | ||
89 | /** | |
d6d00742 | 90 | * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp |
e1f60b29 NM |
91 | * @opp: opp for which voltage has to be returned for |
92 | * | |
984f16c8 | 93 | * Return: voltage in micro volt corresponding to the opp, else |
e1f60b29 NM |
94 | * return 0 |
95 | * | |
96 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | |
97 | * protected pointer. This means that opp which could have been fetched by | |
98 | * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are | |
99 | * under RCU lock. The pointer returned by the opp_find_freq family must be | |
100 | * used in the same section as the usage of this function with the pointer | |
101 | * prior to unlocking with rcu_read_unlock() to maintain the integrity of the | |
102 | * pointer. | |
103 | */ | |
47d43ba7 | 104 | unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) |
e1f60b29 | 105 | { |
47d43ba7 | 106 | struct dev_pm_opp *tmp_opp; |
e1f60b29 NM |
107 | unsigned long v = 0; |
108 | ||
04bf1c7f KK |
109 | opp_rcu_lockdep_assert(); |
110 | ||
e1f60b29 | 111 | tmp_opp = rcu_dereference(opp); |
d6d00742 | 112 | if (IS_ERR_OR_NULL(tmp_opp)) |
e1f60b29 NM |
113 | pr_err("%s: Invalid parameters\n", __func__); |
114 | else | |
115 | v = tmp_opp->u_volt; | |
116 | ||
117 | return v; | |
118 | } | |
5d4879cd | 119 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage); |
e1f60b29 NM |
120 | |
121 | /** | |
5d4879cd | 122 | * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp |
e1f60b29 NM |
123 | * @opp: opp for which frequency has to be returned for |
124 | * | |
984f16c8 | 125 | * Return: frequency in hertz corresponding to the opp, else |
e1f60b29 NM |
126 | * return 0 |
127 | * | |
128 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | |
129 | * protected pointer. This means that opp which could have been fetched by | |
130 | * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are | |
131 | * under RCU lock. The pointer returned by the opp_find_freq family must be | |
132 | * used in the same section as the usage of this function with the pointer | |
133 | * prior to unlocking with rcu_read_unlock() to maintain the integrity of the | |
134 | * pointer. | |
135 | */ | |
47d43ba7 | 136 | unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) |
e1f60b29 | 137 | { |
47d43ba7 | 138 | struct dev_pm_opp *tmp_opp; |
e1f60b29 NM |
139 | unsigned long f = 0; |
140 | ||
04bf1c7f KK |
141 | opp_rcu_lockdep_assert(); |
142 | ||
e1f60b29 | 143 | tmp_opp = rcu_dereference(opp); |
50a3cb04 | 144 | if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) |
e1f60b29 NM |
145 | pr_err("%s: Invalid parameters\n", __func__); |
146 | else | |
147 | f = tmp_opp->rate; | |
148 | ||
149 | return f; | |
150 | } | |
5d4879cd | 151 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); |
e1f60b29 | 152 | |
19445b25 BZ |
153 | /** |
154 | * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not | |
155 | * @opp: opp for which turbo mode is being verified | |
156 | * | |
157 | * Turbo OPPs are not for normal use, and can be enabled (under certain | |
158 | * conditions) for short duration of times to finish high throughput work | |
159 | * quickly. Running on them for longer times may overheat the chip. | |
160 | * | |
161 | * Return: true if opp is turbo opp, else false. | |
162 | * | |
163 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | |
164 | * protected pointer. This means that opp which could have been fetched by | |
165 | * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are | |
166 | * under RCU lock. The pointer returned by the opp_find_freq family must be | |
167 | * used in the same section as the usage of this function with the pointer | |
168 | * prior to unlocking with rcu_read_unlock() to maintain the integrity of the | |
169 | * pointer. | |
170 | */ | |
171 | bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) | |
172 | { | |
173 | struct dev_pm_opp *tmp_opp; | |
174 | ||
175 | opp_rcu_lockdep_assert(); | |
176 | ||
177 | tmp_opp = rcu_dereference(opp); | |
178 | if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) { | |
179 | pr_err("%s: Invalid parameters\n", __func__); | |
180 | return false; | |
181 | } | |
182 | ||
183 | return tmp_opp->turbo; | |
184 | } | |
185 | EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo); | |
186 | ||
3ca9bb33 VK |
187 | /** |
188 | * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds | |
189 | * @dev: device for which we do this operation | |
190 | * | |
191 | * Return: This function returns the max clock latency in nanoseconds. | |
192 | * | |
193 | * Locking: This function takes rcu_read_lock(). | |
194 | */ | |
195 | unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) | |
196 | { | |
2c2709dc | 197 | struct opp_table *opp_table; |
3ca9bb33 VK |
198 | unsigned long clock_latency_ns; |
199 | ||
200 | rcu_read_lock(); | |
201 | ||
2c2709dc VK |
202 | opp_table = _find_opp_table(dev); |
203 | if (IS_ERR(opp_table)) | |
3ca9bb33 VK |
204 | clock_latency_ns = 0; |
205 | else | |
2c2709dc | 206 | clock_latency_ns = opp_table->clock_latency_ns_max; |
3ca9bb33 VK |
207 | |
208 | rcu_read_unlock(); | |
209 | return clock_latency_ns; | |
210 | } | |
211 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); | |
212 | ||
655c9df9 VK |
213 | /** |
214 | * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds | |
215 | * @dev: device for which we do this operation | |
216 | * | |
217 | * Return: This function returns the max voltage latency in nanoseconds. | |
218 | * | |
219 | * Locking: This function takes rcu_read_lock(). | |
220 | */ | |
221 | unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) | |
222 | { | |
2c2709dc | 223 | struct opp_table *opp_table; |
655c9df9 VK |
224 | struct dev_pm_opp *opp; |
225 | struct regulator *reg; | |
226 | unsigned long latency_ns = 0; | |
227 | unsigned long min_uV = ~0, max_uV = 0; | |
228 | int ret; | |
229 | ||
230 | rcu_read_lock(); | |
231 | ||
2c2709dc VK |
232 | opp_table = _find_opp_table(dev); |
233 | if (IS_ERR(opp_table)) { | |
655c9df9 VK |
234 | rcu_read_unlock(); |
235 | return 0; | |
236 | } | |
237 | ||
2c2709dc | 238 | reg = opp_table->regulator; |
0c717d0f | 239 | if (IS_ERR(reg)) { |
655c9df9 | 240 | /* Regulator may not be required for device */ |
655c9df9 VK |
241 | rcu_read_unlock(); |
242 | return 0; | |
243 | } | |
244 | ||
2c2709dc | 245 | list_for_each_entry_rcu(opp, &opp_table->opp_list, node) { |
655c9df9 VK |
246 | if (!opp->available) |
247 | continue; | |
248 | ||
249 | if (opp->u_volt_min < min_uV) | |
250 | min_uV = opp->u_volt_min; | |
251 | if (opp->u_volt_max > max_uV) | |
252 | max_uV = opp->u_volt_max; | |
253 | } | |
254 | ||
255 | rcu_read_unlock(); | |
256 | ||
257 | /* | |
2c2709dc | 258 | * The caller needs to ensure that opp_table (and hence the regulator) |
655c9df9 VK |
259 | * isn't freed, while we are executing this routine. |
260 | */ | |
261 | ret = regulator_set_voltage_time(reg, min_uV, max_uV); | |
262 | if (ret > 0) | |
263 | latency_ns = ret * 1000; | |
264 | ||
265 | return latency_ns; | |
266 | } | |
267 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency); | |
268 | ||
21743447 VK |
269 | /** |
270 | * dev_pm_opp_get_max_transition_latency() - Get max transition latency in | |
271 | * nanoseconds | |
272 | * @dev: device for which we do this operation | |
273 | * | |
274 | * Return: This function returns the max transition latency, in nanoseconds, to | |
275 | * switch from one OPP to other. | |
276 | * | |
277 | * Locking: This function takes rcu_read_lock(). | |
278 | */ | |
279 | unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev) | |
280 | { | |
281 | return dev_pm_opp_get_max_volt_latency(dev) + | |
282 | dev_pm_opp_get_max_clock_latency(dev); | |
283 | } | |
284 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency); | |
285 | ||
4eafbd15 BZ |
286 | /** |
287 | * dev_pm_opp_get_suspend_opp() - Get suspend opp | |
288 | * @dev: device for which we do this operation | |
289 | * | |
290 | * Return: This function returns pointer to the suspend opp if it is | |
1b2b90cb | 291 | * defined and available, otherwise it returns NULL. |
4eafbd15 BZ |
292 | * |
293 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | |
294 | * protected pointer. The reason for the same is that the opp pointer which is | |
295 | * returned will remain valid for use with opp_get_{voltage, freq} only while | |
296 | * under the locked area. The pointer returned must be used prior to unlocking | |
297 | * with rcu_read_unlock() to maintain the integrity of the pointer. | |
298 | */ | |
299 | struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev) | |
300 | { | |
2c2709dc | 301 | struct opp_table *opp_table; |
4eafbd15 BZ |
302 | |
303 | opp_rcu_lockdep_assert(); | |
304 | ||
2c2709dc VK |
305 | opp_table = _find_opp_table(dev); |
306 | if (IS_ERR(opp_table) || !opp_table->suspend_opp || | |
307 | !opp_table->suspend_opp->available) | |
1b2b90cb | 308 | return NULL; |
4eafbd15 | 309 | |
2c2709dc | 310 | return opp_table->suspend_opp; |
4eafbd15 BZ |
311 | } |
312 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp); | |
313 | ||
e1f60b29 | 314 | /** |
2c2709dc | 315 | * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table |
e1f60b29 NM |
316 | * @dev: device for which we do this operation |
317 | * | |
984f16c8 | 318 | * Return: This function returns the number of available opps if there are any, |
e1f60b29 NM |
319 | * else returns 0 if none or the corresponding error value. |
320 | * | |
b4718c02 | 321 | * Locking: This function takes rcu_read_lock(). |
e1f60b29 | 322 | */ |
5d4879cd | 323 | int dev_pm_opp_get_opp_count(struct device *dev) |
e1f60b29 | 324 | { |
2c2709dc | 325 | struct opp_table *opp_table; |
47d43ba7 | 326 | struct dev_pm_opp *temp_opp; |
e1f60b29 NM |
327 | int count = 0; |
328 | ||
b4718c02 | 329 | rcu_read_lock(); |
b02ded24 | 330 | |
2c2709dc VK |
331 | opp_table = _find_opp_table(dev); |
332 | if (IS_ERR(opp_table)) { | |
333 | count = PTR_ERR(opp_table); | |
334 | dev_err(dev, "%s: OPP table not found (%d)\n", | |
b4718c02 DT |
335 | __func__, count); |
336 | goto out_unlock; | |
e1f60b29 NM |
337 | } |
338 | ||
2c2709dc | 339 | list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) { |
e1f60b29 NM |
340 | if (temp_opp->available) |
341 | count++; | |
342 | } | |
343 | ||
b4718c02 DT |
344 | out_unlock: |
345 | rcu_read_unlock(); | |
e1f60b29 NM |
346 | return count; |
347 | } | |
5d4879cd | 348 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); |
e1f60b29 NM |
349 | |
350 | /** | |
5d4879cd | 351 | * dev_pm_opp_find_freq_exact() - search for an exact frequency |
e1f60b29 NM |
352 | * @dev: device for which we do this operation |
353 | * @freq: frequency to search for | |
7ae49618 | 354 | * @available: true/false - match for available opp |
e1f60b29 | 355 | * |
2c2709dc | 356 | * Return: Searches for exact match in the opp table and returns pointer to the |
984f16c8 NM |
357 | * matching opp if found, else returns ERR_PTR in case of error and should |
358 | * be handled using IS_ERR. Error return values can be: | |
0779726c NM |
359 | * EINVAL: for bad pointer |
360 | * ERANGE: no match found for search | |
361 | * ENODEV: if device not found in list of registered devices | |
e1f60b29 NM |
362 | * |
363 | * Note: available is a modifier for the search. if available=true, then the | |
364 | * match is for exact matching frequency and is available in the stored OPP | |
365 | * table. if false, the match is for exact frequency which is not available. | |
366 | * | |
367 | * This provides a mechanism to enable an opp which is not available currently | |
368 | * or the opposite as well. | |
369 | * | |
370 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | |
371 | * protected pointer. The reason for the same is that the opp pointer which is | |
372 | * returned will remain valid for use with opp_get_{voltage, freq} only while | |
373 | * under the locked area. The pointer returned must be used prior to unlocking | |
374 | * with rcu_read_unlock() to maintain the integrity of the pointer. | |
375 | */ | |
47d43ba7 NM |
376 | struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, |
377 | unsigned long freq, | |
378 | bool available) | |
e1f60b29 | 379 | { |
2c2709dc | 380 | struct opp_table *opp_table; |
47d43ba7 | 381 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); |
e1f60b29 | 382 | |
b02ded24 DT |
383 | opp_rcu_lockdep_assert(); |
384 | ||
2c2709dc VK |
385 | opp_table = _find_opp_table(dev); |
386 | if (IS_ERR(opp_table)) { | |
387 | int r = PTR_ERR(opp_table); | |
388 | ||
389 | dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r); | |
e1f60b29 NM |
390 | return ERR_PTR(r); |
391 | } | |
392 | ||
2c2709dc | 393 | list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) { |
e1f60b29 NM |
394 | if (temp_opp->available == available && |
395 | temp_opp->rate == freq) { | |
396 | opp = temp_opp; | |
397 | break; | |
398 | } | |
399 | } | |
400 | ||
401 | return opp; | |
402 | } | |
5d4879cd | 403 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); |
e1f60b29 | 404 | |
067b7ce0 JZ |
405 | static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table, |
406 | unsigned long *freq) | |
407 | { | |
408 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); | |
409 | ||
410 | list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) { | |
411 | if (temp_opp->available && temp_opp->rate >= *freq) { | |
412 | opp = temp_opp; | |
413 | *freq = opp->rate; | |
414 | break; | |
415 | } | |
416 | } | |
417 | ||
418 | return opp; | |
419 | } | |
420 | ||
e1f60b29 | 421 | /** |
5d4879cd | 422 | * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq |
e1f60b29 NM |
423 | * @dev: device for which we do this operation |
424 | * @freq: Start frequency | |
425 | * | |
426 | * Search for the matching ceil *available* OPP from a starting freq | |
427 | * for a device. | |
428 | * | |
984f16c8 | 429 | * Return: matching *opp and refreshes *freq accordingly, else returns |
0779726c NM |
430 | * ERR_PTR in case of error and should be handled using IS_ERR. Error return |
431 | * values can be: | |
432 | * EINVAL: for bad pointer | |
433 | * ERANGE: no match found for search | |
434 | * ENODEV: if device not found in list of registered devices | |
e1f60b29 NM |
435 | * |
436 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | |
437 | * protected pointer. The reason for the same is that the opp pointer which is | |
438 | * returned will remain valid for use with opp_get_{voltage, freq} only while | |
439 | * under the locked area. The pointer returned must be used prior to unlocking | |
440 | * with rcu_read_unlock() to maintain the integrity of the pointer. | |
441 | */ | |
47d43ba7 NM |
442 | struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, |
443 | unsigned long *freq) | |
e1f60b29 | 444 | { |
2c2709dc | 445 | struct opp_table *opp_table; |
e1f60b29 | 446 | |
b02ded24 DT |
447 | opp_rcu_lockdep_assert(); |
448 | ||
e1f60b29 NM |
449 | if (!dev || !freq) { |
450 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); | |
451 | return ERR_PTR(-EINVAL); | |
452 | } | |
453 | ||
2c2709dc VK |
454 | opp_table = _find_opp_table(dev); |
455 | if (IS_ERR(opp_table)) | |
456 | return ERR_CAST(opp_table); | |
e1f60b29 | 457 | |
067b7ce0 | 458 | return _find_freq_ceil(opp_table, freq); |
e1f60b29 | 459 | } |
5d4879cd | 460 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil); |
e1f60b29 NM |
461 | |
462 | /** | |
5d4879cd | 463 | * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq |
e1f60b29 NM |
464 | * @dev: device for which we do this operation |
465 | * @freq: Start frequency | |
466 | * | |
467 | * Search for the matching floor *available* OPP from a starting freq | |
468 | * for a device. | |
469 | * | |
984f16c8 | 470 | * Return: matching *opp and refreshes *freq accordingly, else returns |
0779726c NM |
471 | * ERR_PTR in case of error and should be handled using IS_ERR. Error return |
472 | * values can be: | |
473 | * EINVAL: for bad pointer | |
474 | * ERANGE: no match found for search | |
475 | * ENODEV: if device not found in list of registered devices | |
e1f60b29 NM |
476 | * |
477 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | |
478 | * protected pointer. The reason for the same is that the opp pointer which is | |
479 | * returned will remain valid for use with opp_get_{voltage, freq} only while | |
480 | * under the locked area. The pointer returned must be used prior to unlocking | |
481 | * with rcu_read_unlock() to maintain the integrity of the pointer. | |
482 | */ | |
47d43ba7 NM |
483 | struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, |
484 | unsigned long *freq) | |
e1f60b29 | 485 | { |
2c2709dc | 486 | struct opp_table *opp_table; |
47d43ba7 | 487 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); |
e1f60b29 | 488 | |
b02ded24 DT |
489 | opp_rcu_lockdep_assert(); |
490 | ||
e1f60b29 NM |
491 | if (!dev || !freq) { |
492 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); | |
493 | return ERR_PTR(-EINVAL); | |
494 | } | |
495 | ||
2c2709dc VK |
496 | opp_table = _find_opp_table(dev); |
497 | if (IS_ERR(opp_table)) | |
498 | return ERR_CAST(opp_table); | |
e1f60b29 | 499 | |
2c2709dc | 500 | list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) { |
e1f60b29 NM |
501 | if (temp_opp->available) { |
502 | /* go to the next node, before choosing prev */ | |
503 | if (temp_opp->rate > *freq) | |
504 | break; | |
505 | else | |
506 | opp = temp_opp; | |
507 | } | |
508 | } | |
509 | if (!IS_ERR(opp)) | |
510 | *freq = opp->rate; | |
511 | ||
512 | return opp; | |
513 | } | |
5d4879cd | 514 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); |
e1f60b29 | 515 | |
6a0712f6 | 516 | /* |
2c2709dc | 517 | * The caller needs to ensure that opp_table (and hence the clk) isn't freed, |
6a0712f6 VK |
518 | * while clk returned here is used. |
519 | */ | |
520 | static struct clk *_get_opp_clk(struct device *dev) | |
521 | { | |
2c2709dc | 522 | struct opp_table *opp_table; |
6a0712f6 VK |
523 | struct clk *clk; |
524 | ||
525 | rcu_read_lock(); | |
526 | ||
2c2709dc VK |
527 | opp_table = _find_opp_table(dev); |
528 | if (IS_ERR(opp_table)) { | |
6a0712f6 | 529 | dev_err(dev, "%s: device opp doesn't exist\n", __func__); |
2c2709dc | 530 | clk = ERR_CAST(opp_table); |
6a0712f6 VK |
531 | goto unlock; |
532 | } | |
533 | ||
2c2709dc | 534 | clk = opp_table->clk; |
6a0712f6 VK |
535 | if (IS_ERR(clk)) |
536 | dev_err(dev, "%s: No clock available for the device\n", | |
537 | __func__); | |
538 | ||
539 | unlock: | |
540 | rcu_read_unlock(); | |
541 | return clk; | |
542 | } | |
543 | ||
544 | static int _set_opp_voltage(struct device *dev, struct regulator *reg, | |
545 | unsigned long u_volt, unsigned long u_volt_min, | |
546 | unsigned long u_volt_max) | |
547 | { | |
548 | int ret; | |
549 | ||
550 | /* Regulator not available for device */ | |
551 | if (IS_ERR(reg)) { | |
552 | dev_dbg(dev, "%s: regulator not available: %ld\n", __func__, | |
553 | PTR_ERR(reg)); | |
554 | return 0; | |
555 | } | |
556 | ||
557 | dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min, | |
558 | u_volt, u_volt_max); | |
559 | ||
560 | ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt, | |
561 | u_volt_max); | |
562 | if (ret) | |
563 | dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n", | |
564 | __func__, u_volt_min, u_volt, u_volt_max, ret); | |
565 | ||
566 | return ret; | |
567 | } | |
568 | ||
569 | /** | |
570 | * dev_pm_opp_set_rate() - Configure new OPP based on frequency | |
571 | * @dev: device for which we do this operation | |
572 | * @target_freq: frequency to achieve | |
573 | * | |
574 | * This configures the power-supplies and clock source to the levels specified | |
575 | * by the OPP corresponding to the target_freq. | |
576 | * | |
577 | * Locking: This function takes rcu_read_lock(). | |
578 | */ | |
579 | int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) | |
580 | { | |
2c2709dc | 581 | struct opp_table *opp_table; |
6a0712f6 VK |
582 | struct dev_pm_opp *old_opp, *opp; |
583 | struct regulator *reg; | |
584 | struct clk *clk; | |
585 | unsigned long freq, old_freq; | |
586 | unsigned long u_volt, u_volt_min, u_volt_max; | |
587 | unsigned long ou_volt, ou_volt_min, ou_volt_max; | |
588 | int ret; | |
589 | ||
590 | if (unlikely(!target_freq)) { | |
591 | dev_err(dev, "%s: Invalid target frequency %lu\n", __func__, | |
592 | target_freq); | |
593 | return -EINVAL; | |
594 | } | |
595 | ||
596 | clk = _get_opp_clk(dev); | |
597 | if (IS_ERR(clk)) | |
598 | return PTR_ERR(clk); | |
599 | ||
600 | freq = clk_round_rate(clk, target_freq); | |
601 | if ((long)freq <= 0) | |
602 | freq = target_freq; | |
603 | ||
604 | old_freq = clk_get_rate(clk); | |
605 | ||
606 | /* Return early if nothing to do */ | |
607 | if (old_freq == freq) { | |
608 | dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n", | |
609 | __func__, freq); | |
610 | return 0; | |
611 | } | |
612 | ||
613 | rcu_read_lock(); | |
614 | ||
2c2709dc VK |
615 | opp_table = _find_opp_table(dev); |
616 | if (IS_ERR(opp_table)) { | |
6a0712f6 VK |
617 | dev_err(dev, "%s: device opp doesn't exist\n", __func__); |
618 | rcu_read_unlock(); | |
2c2709dc | 619 | return PTR_ERR(opp_table); |
6a0712f6 VK |
620 | } |
621 | ||
067b7ce0 | 622 | old_opp = _find_freq_ceil(opp_table, &old_freq); |
6a0712f6 VK |
623 | if (!IS_ERR(old_opp)) { |
624 | ou_volt = old_opp->u_volt; | |
625 | ou_volt_min = old_opp->u_volt_min; | |
626 | ou_volt_max = old_opp->u_volt_max; | |
627 | } else { | |
628 | dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n", | |
629 | __func__, old_freq, PTR_ERR(old_opp)); | |
630 | } | |
631 | ||
067b7ce0 | 632 | opp = _find_freq_ceil(opp_table, &freq); |
6a0712f6 VK |
633 | if (IS_ERR(opp)) { |
634 | ret = PTR_ERR(opp); | |
635 | dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n", | |
636 | __func__, freq, ret); | |
637 | rcu_read_unlock(); | |
638 | return ret; | |
639 | } | |
640 | ||
641 | u_volt = opp->u_volt; | |
642 | u_volt_min = opp->u_volt_min; | |
643 | u_volt_max = opp->u_volt_max; | |
644 | ||
2c2709dc | 645 | reg = opp_table->regulator; |
6a0712f6 VK |
646 | |
647 | rcu_read_unlock(); | |
648 | ||
649 | /* Scaling up? Scale voltage before frequency */ | |
650 | if (freq > old_freq) { | |
651 | ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min, | |
652 | u_volt_max); | |
653 | if (ret) | |
654 | goto restore_voltage; | |
655 | } | |
656 | ||
657 | /* Change frequency */ | |
658 | ||
659 | dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", | |
660 | __func__, old_freq, freq); | |
661 | ||
662 | ret = clk_set_rate(clk, freq); | |
663 | if (ret) { | |
664 | dev_err(dev, "%s: failed to set clock rate: %d\n", __func__, | |
665 | ret); | |
666 | goto restore_voltage; | |
667 | } | |
668 | ||
669 | /* Scaling down? Scale voltage after frequency */ | |
670 | if (freq < old_freq) { | |
671 | ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min, | |
672 | u_volt_max); | |
673 | if (ret) | |
674 | goto restore_freq; | |
675 | } | |
676 | ||
677 | return 0; | |
678 | ||
679 | restore_freq: | |
680 | if (clk_set_rate(clk, old_freq)) | |
681 | dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n", | |
682 | __func__, old_freq); | |
683 | restore_voltage: | |
684 | /* This shouldn't harm even if the voltages weren't updated earlier */ | |
685 | if (!IS_ERR(old_opp)) | |
686 | _set_opp_voltage(dev, reg, ou_volt, ou_volt_min, ou_volt_max); | |
687 | ||
688 | return ret; | |
689 | } | |
690 | EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate); | |
691 | ||
2c2709dc VK |
692 | /* OPP-dev Helpers */ |
693 | static void _kfree_opp_dev_rcu(struct rcu_head *head) | |
06441658 | 694 | { |
2c2709dc | 695 | struct opp_device *opp_dev; |
06441658 | 696 | |
2c2709dc VK |
697 | opp_dev = container_of(head, struct opp_device, rcu_head); |
698 | kfree_rcu(opp_dev, rcu_head); | |
06441658 VK |
699 | } |
700 | ||
2c2709dc VK |
701 | static void _remove_opp_dev(struct opp_device *opp_dev, |
702 | struct opp_table *opp_table) | |
06441658 | 703 | { |
2c2709dc VK |
704 | opp_debug_unregister(opp_dev, opp_table); |
705 | list_del(&opp_dev->node); | |
706 | call_srcu(&opp_table->srcu_head.srcu, &opp_dev->rcu_head, | |
707 | _kfree_opp_dev_rcu); | |
06441658 VK |
708 | } |
709 | ||
2c2709dc VK |
710 | struct opp_device *_add_opp_dev(const struct device *dev, |
711 | struct opp_table *opp_table) | |
06441658 | 712 | { |
2c2709dc | 713 | struct opp_device *opp_dev; |
deaa5146 | 714 | int ret; |
06441658 | 715 | |
2c2709dc VK |
716 | opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL); |
717 | if (!opp_dev) | |
06441658 VK |
718 | return NULL; |
719 | ||
2c2709dc VK |
720 | /* Initialize opp-dev */ |
721 | opp_dev->dev = dev; | |
722 | list_add_rcu(&opp_dev->node, &opp_table->dev_list); | |
06441658 | 723 | |
2c2709dc VK |
724 | /* Create debugfs entries for the opp_table */ |
725 | ret = opp_debug_register(opp_dev, opp_table); | |
deaa5146 VK |
726 | if (ret) |
727 | dev_err(dev, "%s: Failed to register opp debugfs (%d)\n", | |
728 | __func__, ret); | |
729 | ||
2c2709dc | 730 | return opp_dev; |
06441658 VK |
731 | } |
732 | ||
984f16c8 | 733 | /** |
2c2709dc | 734 | * _add_opp_table() - Find OPP table or allocate a new one |
984f16c8 NM |
735 | * @dev: device for which we do this operation |
736 | * | |
aa5f2f85 VK |
737 | * It tries to find an existing table first, if it couldn't find one, it |
738 | * allocates a new OPP table and returns that. | |
984f16c8 | 739 | * |
2c2709dc | 740 | * Return: valid opp_table pointer if success, else NULL. |
984f16c8 | 741 | */ |
2c2709dc | 742 | static struct opp_table *_add_opp_table(struct device *dev) |
07cce74a | 743 | { |
2c2709dc VK |
744 | struct opp_table *opp_table; |
745 | struct opp_device *opp_dev; | |
d54974c2 | 746 | int ret; |
07cce74a | 747 | |
2c2709dc VK |
748 | /* Check for existing table for 'dev' first */ |
749 | opp_table = _find_opp_table(dev); | |
750 | if (!IS_ERR(opp_table)) | |
751 | return opp_table; | |
07cce74a VK |
752 | |
753 | /* | |
2c2709dc | 754 | * Allocate a new OPP table. In the infrequent case where a new |
07cce74a VK |
755 | * device is needed to be added, we pay this penalty. |
756 | */ | |
2c2709dc VK |
757 | opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL); |
758 | if (!opp_table) | |
07cce74a VK |
759 | return NULL; |
760 | ||
2c2709dc | 761 | INIT_LIST_HEAD(&opp_table->dev_list); |
06441658 | 762 | |
2c2709dc VK |
763 | opp_dev = _add_opp_dev(dev, opp_table); |
764 | if (!opp_dev) { | |
765 | kfree(opp_table); | |
06441658 VK |
766 | return NULL; |
767 | } | |
768 | ||
f47b72a1 | 769 | _of_init_opp_table(opp_table, dev); |
50f8cfbd | 770 | |
0c717d0f | 771 | /* Set regulator to a non-NULL error value */ |
2c2709dc | 772 | opp_table->regulator = ERR_PTR(-ENXIO); |
0c717d0f | 773 | |
d54974c2 | 774 | /* Find clk for the device */ |
2c2709dc VK |
775 | opp_table->clk = clk_get(dev, NULL); |
776 | if (IS_ERR(opp_table->clk)) { | |
777 | ret = PTR_ERR(opp_table->clk); | |
d54974c2 VK |
778 | if (ret != -EPROBE_DEFER) |
779 | dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, | |
780 | ret); | |
781 | } | |
782 | ||
2c2709dc VK |
783 | srcu_init_notifier_head(&opp_table->srcu_head); |
784 | INIT_LIST_HEAD(&opp_table->opp_list); | |
07cce74a | 785 | |
2c2709dc VK |
786 | /* Secure the device table modification */ |
787 | list_add_rcu(&opp_table->node, &opp_tables); | |
788 | return opp_table; | |
07cce74a VK |
789 | } |
790 | ||
984f16c8 | 791 | /** |
2c2709dc | 792 | * _kfree_device_rcu() - Free opp_table RCU handler |
737002b5 | 793 | * @head: RCU head |
984f16c8 | 794 | */ |
737002b5 | 795 | static void _kfree_device_rcu(struct rcu_head *head) |
e1f60b29 | 796 | { |
2c2709dc VK |
797 | struct opp_table *opp_table = container_of(head, struct opp_table, |
798 | rcu_head); | |
6ce4184d | 799 | |
2c2709dc | 800 | kfree_rcu(opp_table, rcu_head); |
e1f60b29 | 801 | } |
38393409 VK |
802 | |
803 | /** | |
2c2709dc VK |
804 | * _remove_opp_table() - Removes a OPP table |
805 | * @opp_table: OPP table to be removed. | |
38393409 | 806 | * |
2c2709dc | 807 | * Removes/frees OPP table if it doesn't contain any OPPs. |
38393409 | 808 | */ |
2c2709dc | 809 | static void _remove_opp_table(struct opp_table *opp_table) |
38393409 | 810 | { |
2c2709dc | 811 | struct opp_device *opp_dev; |
06441658 | 812 | |
2c2709dc | 813 | if (!list_empty(&opp_table->opp_list)) |
3bac42ca VK |
814 | return; |
815 | ||
2c2709dc | 816 | if (opp_table->supported_hw) |
7de36b0a VK |
817 | return; |
818 | ||
2c2709dc | 819 | if (opp_table->prop_name) |
01fb4d3c VK |
820 | return; |
821 | ||
2c2709dc | 822 | if (!IS_ERR(opp_table->regulator)) |
9f8ea969 VK |
823 | return; |
824 | ||
d54974c2 | 825 | /* Release clk */ |
2c2709dc VK |
826 | if (!IS_ERR(opp_table->clk)) |
827 | clk_put(opp_table->clk); | |
d54974c2 | 828 | |
2c2709dc VK |
829 | opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device, |
830 | node); | |
06441658 | 831 | |
2c2709dc | 832 | _remove_opp_dev(opp_dev, opp_table); |
06441658 VK |
833 | |
834 | /* dev_list must be empty now */ | |
2c2709dc | 835 | WARN_ON(!list_empty(&opp_table->dev_list)); |
06441658 | 836 | |
2c2709dc VK |
837 | list_del_rcu(&opp_table->node); |
838 | call_srcu(&opp_table->srcu_head.srcu, &opp_table->rcu_head, | |
3bac42ca | 839 | _kfree_device_rcu); |
38393409 | 840 | } |
e1f60b29 | 841 | |
984f16c8 NM |
842 | /** |
843 | * _kfree_opp_rcu() - Free OPP RCU handler | |
844 | * @head: RCU head | |
845 | */ | |
327854c8 | 846 | static void _kfree_opp_rcu(struct rcu_head *head) |
129eec55 VK |
847 | { |
848 | struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head); | |
849 | ||
850 | kfree_rcu(opp, rcu_head); | |
851 | } | |
852 | ||
984f16c8 NM |
853 | /** |
854 | * _opp_remove() - Remove an OPP from a table definition | |
2c2709dc | 855 | * @opp_table: points back to the opp_table struct this opp belongs to |
984f16c8 | 856 | * @opp: pointer to the OPP to remove |
23dacf6d | 857 | * @notify: OPP_EVENT_REMOVE notification should be sent or not |
984f16c8 | 858 | * |
2c2709dc | 859 | * This function removes an opp definition from the opp table. |
984f16c8 | 860 | * |
2c2709dc | 861 | * Locking: The internal opp_table and opp structures are RCU protected. |
984f16c8 NM |
862 | * It is assumed that the caller holds required mutex for an RCU updater |
863 | * strategy. | |
864 | */ | |
f47b72a1 VK |
865 | void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp, |
866 | bool notify) | |
129eec55 VK |
867 | { |
868 | /* | |
869 | * Notify the changes in the availability of the operable | |
870 | * frequency/voltage list. | |
871 | */ | |
23dacf6d | 872 | if (notify) |
2c2709dc VK |
873 | srcu_notifier_call_chain(&opp_table->srcu_head, |
874 | OPP_EVENT_REMOVE, opp); | |
deaa5146 | 875 | opp_debug_remove_one(opp); |
129eec55 | 876 | list_del_rcu(&opp->node); |
2c2709dc | 877 | call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); |
129eec55 | 878 | |
2c2709dc | 879 | _remove_opp_table(opp_table); |
129eec55 VK |
880 | } |
881 | ||
882 | /** | |
2c2709dc | 883 | * dev_pm_opp_remove() - Remove an OPP from OPP table |
129eec55 VK |
884 | * @dev: device for which we do this operation |
885 | * @freq: OPP to remove with matching 'freq' | |
886 | * | |
2c2709dc | 887 | * This function removes an opp from the opp table. |
984f16c8 | 888 | * |
2c2709dc | 889 | * Locking: The internal opp_table and opp structures are RCU protected. |
984f16c8 NM |
890 | * Hence this function internally uses RCU updater strategy with mutex locks |
891 | * to keep the integrity of the internal data structures. Callers should ensure | |
892 | * that this function is *NOT* called under RCU protection or in contexts where | |
893 | * mutex cannot be locked. | |
129eec55 VK |
894 | */ |
895 | void dev_pm_opp_remove(struct device *dev, unsigned long freq) | |
896 | { | |
897 | struct dev_pm_opp *opp; | |
2c2709dc | 898 | struct opp_table *opp_table; |
129eec55 VK |
899 | bool found = false; |
900 | ||
2c2709dc VK |
901 | /* Hold our table modification lock here */ |
902 | mutex_lock(&opp_table_lock); | |
129eec55 | 903 | |
2c2709dc VK |
904 | opp_table = _find_opp_table(dev); |
905 | if (IS_ERR(opp_table)) | |
129eec55 VK |
906 | goto unlock; |
907 | ||
2c2709dc | 908 | list_for_each_entry(opp, &opp_table->opp_list, node) { |
129eec55 VK |
909 | if (opp->rate == freq) { |
910 | found = true; | |
911 | break; | |
912 | } | |
913 | } | |
914 | ||
915 | if (!found) { | |
916 | dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n", | |
917 | __func__, freq); | |
918 | goto unlock; | |
919 | } | |
920 | ||
2c2709dc | 921 | _opp_remove(opp_table, opp, true); |
129eec55 | 922 | unlock: |
2c2709dc | 923 | mutex_unlock(&opp_table_lock); |
129eec55 VK |
924 | } |
925 | EXPORT_SYMBOL_GPL(dev_pm_opp_remove); | |
926 | ||
f47b72a1 VK |
927 | struct dev_pm_opp *_allocate_opp(struct device *dev, |
928 | struct opp_table **opp_table) | |
e1f60b29 | 929 | { |
23dacf6d | 930 | struct dev_pm_opp *opp; |
e1f60b29 | 931 | |
23dacf6d VK |
932 | /* allocate new OPP node */ |
933 | opp = kzalloc(sizeof(*opp), GFP_KERNEL); | |
934 | if (!opp) | |
935 | return NULL; | |
e1f60b29 | 936 | |
23dacf6d | 937 | INIT_LIST_HEAD(&opp->node); |
e1f60b29 | 938 | |
2c2709dc VK |
939 | *opp_table = _add_opp_table(dev); |
940 | if (!*opp_table) { | |
23dacf6d VK |
941 | kfree(opp); |
942 | return NULL; | |
943 | } | |
944 | ||
945 | return opp; | |
946 | } | |
947 | ||
7d34d56e | 948 | static bool _opp_supported_by_regulators(struct dev_pm_opp *opp, |
2c2709dc | 949 | struct opp_table *opp_table) |
7d34d56e | 950 | { |
2c2709dc | 951 | struct regulator *reg = opp_table->regulator; |
7d34d56e | 952 | |
0c717d0f | 953 | if (!IS_ERR(reg) && |
7d34d56e VK |
954 | !regulator_is_supported_voltage(reg, opp->u_volt_min, |
955 | opp->u_volt_max)) { | |
956 | pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n", | |
957 | __func__, opp->u_volt_min, opp->u_volt_max); | |
958 | return false; | |
959 | } | |
960 | ||
961 | return true; | |
962 | } | |
963 | ||
f47b72a1 VK |
964 | int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, |
965 | struct opp_table *opp_table) | |
23dacf6d VK |
966 | { |
967 | struct dev_pm_opp *opp; | |
2c2709dc | 968 | struct list_head *head = &opp_table->opp_list; |
deaa5146 | 969 | int ret; |
23dacf6d VK |
970 | |
971 | /* | |
972 | * Insert new OPP in order of increasing frequency and discard if | |
973 | * already present. | |
974 | * | |
2c2709dc | 975 | * Need to use &opp_table->opp_list in the condition part of the 'for' |
23dacf6d VK |
976 | * loop, don't replace it with head otherwise it will become an infinite |
977 | * loop. | |
978 | */ | |
2c2709dc | 979 | list_for_each_entry_rcu(opp, &opp_table->opp_list, node) { |
23dacf6d VK |
980 | if (new_opp->rate > opp->rate) { |
981 | head = &opp->node; | |
982 | continue; | |
983 | } | |
984 | ||
985 | if (new_opp->rate < opp->rate) | |
986 | break; | |
987 | ||
988 | /* Duplicate OPPs */ | |
06441658 | 989 | dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", |
23dacf6d VK |
990 | __func__, opp->rate, opp->u_volt, opp->available, |
991 | new_opp->rate, new_opp->u_volt, new_opp->available); | |
992 | ||
993 | return opp->available && new_opp->u_volt == opp->u_volt ? | |
994 | 0 : -EEXIST; | |
995 | } | |
996 | ||
2c2709dc | 997 | new_opp->opp_table = opp_table; |
23dacf6d VK |
998 | list_add_rcu(&new_opp->node, head); |
999 | ||
2c2709dc | 1000 | ret = opp_debug_create_one(new_opp, opp_table); |
deaa5146 VK |
1001 | if (ret) |
1002 | dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n", | |
1003 | __func__, ret); | |
1004 | ||
2c2709dc | 1005 | if (!_opp_supported_by_regulators(new_opp, opp_table)) { |
7d34d56e VK |
1006 | new_opp->available = false; |
1007 | dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n", | |
1008 | __func__, new_opp->rate); | |
1009 | } | |
1010 | ||
23dacf6d VK |
1011 | return 0; |
1012 | } | |
1013 | ||
984f16c8 | 1014 | /** |
b64b9c3f | 1015 | * _opp_add_v1() - Allocate a OPP based on v1 bindings. |
984f16c8 NM |
1016 | * @dev: device for which we do this operation |
1017 | * @freq: Frequency in Hz for this OPP | |
1018 | * @u_volt: Voltage in uVolts for this OPP | |
1019 | * @dynamic: Dynamically added OPPs. | |
1020 | * | |
2c2709dc | 1021 | * This function adds an opp definition to the opp table and returns status. |
984f16c8 NM |
1022 | * The opp is made available by default and it can be controlled using |
1023 | * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove. | |
1024 | * | |
8f8d37b2 VK |
1025 | * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table |
1026 | * and freed by dev_pm_opp_of_remove_table. | |
984f16c8 | 1027 | * |
2c2709dc | 1028 | * Locking: The internal opp_table and opp structures are RCU protected. |
984f16c8 NM |
1029 | * Hence this function internally uses RCU updater strategy with mutex locks |
1030 | * to keep the integrity of the internal data structures. Callers should ensure | |
1031 | * that this function is *NOT* called under RCU protection or in contexts where | |
1032 | * mutex cannot be locked. | |
1033 | * | |
1034 | * Return: | |
1035 | * 0 On success OR | |
1036 | * Duplicate OPPs (both freq and volt are same) and opp->available | |
1037 | * -EEXIST Freq are same and volt are different OR | |
1038 | * Duplicate OPPs (both freq and volt are same) and !opp->available | |
1039 | * -ENOMEM Memory allocation failure | |
1040 | */ | |
f47b72a1 VK |
1041 | int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, |
1042 | bool dynamic) | |
e1f60b29 | 1043 | { |
2c2709dc | 1044 | struct opp_table *opp_table; |
23dacf6d | 1045 | struct dev_pm_opp *new_opp; |
50f8cfbd | 1046 | unsigned long tol; |
6ce4184d | 1047 | int ret; |
e1f60b29 | 1048 | |
2c2709dc VK |
1049 | /* Hold our table modification lock here */ |
1050 | mutex_lock(&opp_table_lock); | |
e1f60b29 | 1051 | |
2c2709dc | 1052 | new_opp = _allocate_opp(dev, &opp_table); |
23dacf6d VK |
1053 | if (!new_opp) { |
1054 | ret = -ENOMEM; | |
1055 | goto unlock; | |
1056 | } | |
1057 | ||
a7470db6 | 1058 | /* populate the opp table */ |
a7470db6 | 1059 | new_opp->rate = freq; |
2c2709dc | 1060 | tol = u_volt * opp_table->voltage_tolerance_v1 / 100; |
a7470db6 | 1061 | new_opp->u_volt = u_volt; |
50f8cfbd VK |
1062 | new_opp->u_volt_min = u_volt - tol; |
1063 | new_opp->u_volt_max = u_volt + tol; | |
a7470db6 | 1064 | new_opp->available = true; |
23dacf6d | 1065 | new_opp->dynamic = dynamic; |
a7470db6 | 1066 | |
2c2709dc | 1067 | ret = _opp_add(dev, new_opp, opp_table); |
23dacf6d | 1068 | if (ret) |
6ce4184d | 1069 | goto free_opp; |
64ce8545 | 1070 | |
2c2709dc | 1071 | mutex_unlock(&opp_table_lock); |
e1f60b29 | 1072 | |
03ca370f MH |
1073 | /* |
1074 | * Notify the changes in the availability of the operable | |
1075 | * frequency/voltage list. | |
1076 | */ | |
2c2709dc | 1077 | srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp); |
e1f60b29 | 1078 | return 0; |
6ce4184d VK |
1079 | |
1080 | free_opp: | |
2c2709dc | 1081 | _opp_remove(opp_table, new_opp, false); |
23dacf6d | 1082 | unlock: |
2c2709dc | 1083 | mutex_unlock(&opp_table_lock); |
6ce4184d | 1084 | return ret; |
e1f60b29 | 1085 | } |
38393409 | 1086 | |
7de36b0a VK |
1087 | /** |
1088 | * dev_pm_opp_set_supported_hw() - Set supported platforms | |
1089 | * @dev: Device for which supported-hw has to be set. | |
1090 | * @versions: Array of hierarchy of versions to match. | |
1091 | * @count: Number of elements in the array. | |
1092 | * | |
1093 | * This is required only for the V2 bindings, and it enables a platform to | |
1094 | * specify the hierarchy of versions it supports. OPP layer will then enable | |
1095 | * OPPs, which are available for those versions, based on its 'opp-supported-hw' | |
1096 | * property. | |
1097 | * | |
2c2709dc | 1098 | * Locking: The internal opp_table and opp structures are RCU protected. |
7de36b0a VK |
1099 | * Hence this function internally uses RCU updater strategy with mutex locks |
1100 | * to keep the integrity of the internal data structures. Callers should ensure | |
1101 | * that this function is *NOT* called under RCU protection or in contexts where | |
1102 | * mutex cannot be locked. | |
1103 | */ | |
1104 | int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, | |
1105 | unsigned int count) | |
1106 | { | |
2c2709dc | 1107 | struct opp_table *opp_table; |
7de36b0a VK |
1108 | int ret = 0; |
1109 | ||
2c2709dc VK |
1110 | /* Hold our table modification lock here */ |
1111 | mutex_lock(&opp_table_lock); | |
7de36b0a | 1112 | |
2c2709dc VK |
1113 | opp_table = _add_opp_table(dev); |
1114 | if (!opp_table) { | |
7de36b0a VK |
1115 | ret = -ENOMEM; |
1116 | goto unlock; | |
1117 | } | |
1118 | ||
2c2709dc VK |
1119 | /* Make sure there are no concurrent readers while updating opp_table */ |
1120 | WARN_ON(!list_empty(&opp_table->opp_list)); | |
7de36b0a | 1121 | |
2c2709dc VK |
1122 | /* Do we already have a version hierarchy associated with opp_table? */ |
1123 | if (opp_table->supported_hw) { | |
7de36b0a VK |
1124 | dev_err(dev, "%s: Already have supported hardware list\n", |
1125 | __func__); | |
1126 | ret = -EBUSY; | |
1127 | goto err; | |
1128 | } | |
1129 | ||
2c2709dc | 1130 | opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions), |
7de36b0a | 1131 | GFP_KERNEL); |
2c2709dc | 1132 | if (!opp_table->supported_hw) { |
7de36b0a VK |
1133 | ret = -ENOMEM; |
1134 | goto err; | |
1135 | } | |
1136 | ||
2c2709dc VK |
1137 | opp_table->supported_hw_count = count; |
1138 | mutex_unlock(&opp_table_lock); | |
7de36b0a VK |
1139 | return 0; |
1140 | ||
1141 | err: | |
2c2709dc | 1142 | _remove_opp_table(opp_table); |
7de36b0a | 1143 | unlock: |
2c2709dc | 1144 | mutex_unlock(&opp_table_lock); |
7de36b0a VK |
1145 | |
1146 | return ret; | |
1147 | } | |
1148 | EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw); | |
1149 | ||
1150 | /** | |
1151 | * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw | |
a5da6447 | 1152 | * @dev: Device for which supported-hw has to be put. |
7de36b0a VK |
1153 | * |
1154 | * This is required only for the V2 bindings, and is called for a matching | |
2c2709dc | 1155 | * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure |
7de36b0a VK |
1156 | * will not be freed. |
1157 | * | |
2c2709dc | 1158 | * Locking: The internal opp_table and opp structures are RCU protected. |
7de36b0a VK |
1159 | * Hence this function internally uses RCU updater strategy with mutex locks |
1160 | * to keep the integrity of the internal data structures. Callers should ensure | |
1161 | * that this function is *NOT* called under RCU protection or in contexts where | |
1162 | * mutex cannot be locked. | |
1163 | */ | |
1164 | void dev_pm_opp_put_supported_hw(struct device *dev) | |
1165 | { | |
2c2709dc | 1166 | struct opp_table *opp_table; |
7de36b0a | 1167 | |
2c2709dc VK |
1168 | /* Hold our table modification lock here */ |
1169 | mutex_lock(&opp_table_lock); | |
7de36b0a | 1170 | |
2c2709dc VK |
1171 | /* Check for existing table for 'dev' first */ |
1172 | opp_table = _find_opp_table(dev); | |
1173 | if (IS_ERR(opp_table)) { | |
1174 | dev_err(dev, "Failed to find opp_table: %ld\n", | |
1175 | PTR_ERR(opp_table)); | |
7de36b0a VK |
1176 | goto unlock; |
1177 | } | |
1178 | ||
2c2709dc VK |
1179 | /* Make sure there are no concurrent readers while updating opp_table */ |
1180 | WARN_ON(!list_empty(&opp_table->opp_list)); | |
7de36b0a | 1181 | |
2c2709dc | 1182 | if (!opp_table->supported_hw) { |
7de36b0a VK |
1183 | dev_err(dev, "%s: Doesn't have supported hardware list\n", |
1184 | __func__); | |
1185 | goto unlock; | |
1186 | } | |
1187 | ||
2c2709dc VK |
1188 | kfree(opp_table->supported_hw); |
1189 | opp_table->supported_hw = NULL; | |
1190 | opp_table->supported_hw_count = 0; | |
7de36b0a | 1191 | |
2c2709dc VK |
1192 | /* Try freeing opp_table if this was the last blocking resource */ |
1193 | _remove_opp_table(opp_table); | |
7de36b0a VK |
1194 | |
1195 | unlock: | |
2c2709dc | 1196 | mutex_unlock(&opp_table_lock); |
7de36b0a VK |
1197 | } |
1198 | EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw); | |
1199 | ||
01fb4d3c VK |
1200 | /** |
1201 | * dev_pm_opp_set_prop_name() - Set prop-extn name | |
a5da6447 | 1202 | * @dev: Device for which the prop-name has to be set. |
01fb4d3c VK |
1203 | * @name: name to postfix to properties. |
1204 | * | |
1205 | * This is required only for the V2 bindings, and it enables a platform to | |
1206 | * specify the extn to be used for certain property names. The properties to | |
1207 | * which the extension will apply are opp-microvolt and opp-microamp. OPP core | |
1208 | * should postfix the property name with -<name> while looking for them. | |
1209 | * | |
2c2709dc | 1210 | * Locking: The internal opp_table and opp structures are RCU protected. |
01fb4d3c VK |
1211 | * Hence this function internally uses RCU updater strategy with mutex locks |
1212 | * to keep the integrity of the internal data structures. Callers should ensure | |
1213 | * that this function is *NOT* called under RCU protection or in contexts where | |
1214 | * mutex cannot be locked. | |
1215 | */ | |
1216 | int dev_pm_opp_set_prop_name(struct device *dev, const char *name) | |
1217 | { | |
2c2709dc | 1218 | struct opp_table *opp_table; |
01fb4d3c VK |
1219 | int ret = 0; |
1220 | ||
2c2709dc VK |
1221 | /* Hold our table modification lock here */ |
1222 | mutex_lock(&opp_table_lock); | |
01fb4d3c | 1223 | |
2c2709dc VK |
1224 | opp_table = _add_opp_table(dev); |
1225 | if (!opp_table) { | |
01fb4d3c VK |
1226 | ret = -ENOMEM; |
1227 | goto unlock; | |
1228 | } | |
1229 | ||
2c2709dc VK |
1230 | /* Make sure there are no concurrent readers while updating opp_table */ |
1231 | WARN_ON(!list_empty(&opp_table->opp_list)); | |
01fb4d3c | 1232 | |
2c2709dc VK |
1233 | /* Do we already have a prop-name associated with opp_table? */ |
1234 | if (opp_table->prop_name) { | |
01fb4d3c | 1235 | dev_err(dev, "%s: Already have prop-name %s\n", __func__, |
2c2709dc | 1236 | opp_table->prop_name); |
01fb4d3c VK |
1237 | ret = -EBUSY; |
1238 | goto err; | |
1239 | } | |
1240 | ||
2c2709dc VK |
1241 | opp_table->prop_name = kstrdup(name, GFP_KERNEL); |
1242 | if (!opp_table->prop_name) { | |
01fb4d3c VK |
1243 | ret = -ENOMEM; |
1244 | goto err; | |
1245 | } | |
1246 | ||
2c2709dc | 1247 | mutex_unlock(&opp_table_lock); |
01fb4d3c VK |
1248 | return 0; |
1249 | ||
1250 | err: | |
2c2709dc | 1251 | _remove_opp_table(opp_table); |
01fb4d3c | 1252 | unlock: |
2c2709dc | 1253 | mutex_unlock(&opp_table_lock); |
01fb4d3c VK |
1254 | |
1255 | return ret; | |
1256 | } | |
1257 | EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name); | |
1258 | ||
1259 | /** | |
1260 | * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name | |
a5da6447 | 1261 | * @dev: Device for which the prop-name has to be put. |
01fb4d3c VK |
1262 | * |
1263 | * This is required only for the V2 bindings, and is called for a matching | |
2c2709dc | 1264 | * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure |
01fb4d3c VK |
1265 | * will not be freed. |
1266 | * | |
2c2709dc | 1267 | * Locking: The internal opp_table and opp structures are RCU protected. |
01fb4d3c VK |
1268 | * Hence this function internally uses RCU updater strategy with mutex locks |
1269 | * to keep the integrity of the internal data structures. Callers should ensure | |
1270 | * that this function is *NOT* called under RCU protection or in contexts where | |
1271 | * mutex cannot be locked. | |
1272 | */ | |
1273 | void dev_pm_opp_put_prop_name(struct device *dev) | |
1274 | { | |
2c2709dc | 1275 | struct opp_table *opp_table; |
01fb4d3c | 1276 | |
2c2709dc VK |
1277 | /* Hold our table modification lock here */ |
1278 | mutex_lock(&opp_table_lock); | |
01fb4d3c | 1279 | |
2c2709dc VK |
1280 | /* Check for existing table for 'dev' first */ |
1281 | opp_table = _find_opp_table(dev); | |
1282 | if (IS_ERR(opp_table)) { | |
1283 | dev_err(dev, "Failed to find opp_table: %ld\n", | |
1284 | PTR_ERR(opp_table)); | |
01fb4d3c VK |
1285 | goto unlock; |
1286 | } | |
1287 | ||
2c2709dc VK |
1288 | /* Make sure there are no concurrent readers while updating opp_table */ |
1289 | WARN_ON(!list_empty(&opp_table->opp_list)); | |
01fb4d3c | 1290 | |
2c2709dc | 1291 | if (!opp_table->prop_name) { |
01fb4d3c VK |
1292 | dev_err(dev, "%s: Doesn't have a prop-name\n", __func__); |
1293 | goto unlock; | |
1294 | } | |
1295 | ||
2c2709dc VK |
1296 | kfree(opp_table->prop_name); |
1297 | opp_table->prop_name = NULL; | |
01fb4d3c | 1298 | |
2c2709dc VK |
1299 | /* Try freeing opp_table if this was the last blocking resource */ |
1300 | _remove_opp_table(opp_table); | |
01fb4d3c VK |
1301 | |
1302 | unlock: | |
2c2709dc | 1303 | mutex_unlock(&opp_table_lock); |
01fb4d3c VK |
1304 | } |
1305 | EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name); | |
1306 | ||
9f8ea969 VK |
1307 | /** |
1308 | * dev_pm_opp_set_regulator() - Set regulator name for the device | |
1309 | * @dev: Device for which regulator name is being set. | |
1310 | * @name: Name of the regulator. | |
1311 | * | |
1312 | * In order to support OPP switching, OPP layer needs to know the name of the | |
1313 | * device's regulator, as the core would be required to switch voltages as well. | |
1314 | * | |
1315 | * This must be called before any OPPs are initialized for the device. | |
1316 | * | |
2c2709dc | 1317 | * Locking: The internal opp_table and opp structures are RCU protected. |
9f8ea969 VK |
1318 | * Hence this function internally uses RCU updater strategy with mutex locks |
1319 | * to keep the integrity of the internal data structures. Callers should ensure | |
1320 | * that this function is *NOT* called under RCU protection or in contexts where | |
1321 | * mutex cannot be locked. | |
1322 | */ | |
1323 | int dev_pm_opp_set_regulator(struct device *dev, const char *name) | |
1324 | { | |
2c2709dc | 1325 | struct opp_table *opp_table; |
9f8ea969 VK |
1326 | struct regulator *reg; |
1327 | int ret; | |
1328 | ||
2c2709dc | 1329 | mutex_lock(&opp_table_lock); |
9f8ea969 | 1330 | |
2c2709dc VK |
1331 | opp_table = _add_opp_table(dev); |
1332 | if (!opp_table) { | |
9f8ea969 VK |
1333 | ret = -ENOMEM; |
1334 | goto unlock; | |
1335 | } | |
1336 | ||
1337 | /* This should be called before OPPs are initialized */ | |
2c2709dc | 1338 | if (WARN_ON(!list_empty(&opp_table->opp_list))) { |
9f8ea969 VK |
1339 | ret = -EBUSY; |
1340 | goto err; | |
1341 | } | |
1342 | ||
1343 | /* Already have a regulator set */ | |
2c2709dc | 1344 | if (WARN_ON(!IS_ERR(opp_table->regulator))) { |
9f8ea969 VK |
1345 | ret = -EBUSY; |
1346 | goto err; | |
1347 | } | |
1348 | /* Allocate the regulator */ | |
1349 | reg = regulator_get_optional(dev, name); | |
1350 | if (IS_ERR(reg)) { | |
1351 | ret = PTR_ERR(reg); | |
1352 | if (ret != -EPROBE_DEFER) | |
1353 | dev_err(dev, "%s: no regulator (%s) found: %d\n", | |
1354 | __func__, name, ret); | |
1355 | goto err; | |
1356 | } | |
1357 | ||
2c2709dc | 1358 | opp_table->regulator = reg; |
9f8ea969 | 1359 | |
2c2709dc | 1360 | mutex_unlock(&opp_table_lock); |
9f8ea969 VK |
1361 | return 0; |
1362 | ||
1363 | err: | |
2c2709dc | 1364 | _remove_opp_table(opp_table); |
9f8ea969 | 1365 | unlock: |
2c2709dc | 1366 | mutex_unlock(&opp_table_lock); |
9f8ea969 VK |
1367 | |
1368 | return ret; | |
1369 | } | |
1370 | EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator); | |
1371 | ||
1372 | /** | |
1373 | * dev_pm_opp_put_regulator() - Releases resources blocked for regulator | |
1374 | * @dev: Device for which regulator was set. | |
1375 | * | |
2c2709dc | 1376 | * Locking: The internal opp_table and opp structures are RCU protected. |
9f8ea969 VK |
1377 | * Hence this function internally uses RCU updater strategy with mutex locks |
1378 | * to keep the integrity of the internal data structures. Callers should ensure | |
1379 | * that this function is *NOT* called under RCU protection or in contexts where | |
1380 | * mutex cannot be locked. | |
1381 | */ | |
1382 | void dev_pm_opp_put_regulator(struct device *dev) | |
1383 | { | |
2c2709dc | 1384 | struct opp_table *opp_table; |
9f8ea969 | 1385 | |
2c2709dc | 1386 | mutex_lock(&opp_table_lock); |
9f8ea969 | 1387 | |
2c2709dc VK |
1388 | /* Check for existing table for 'dev' first */ |
1389 | opp_table = _find_opp_table(dev); | |
1390 | if (IS_ERR(opp_table)) { | |
1391 | dev_err(dev, "Failed to find opp_table: %ld\n", | |
1392 | PTR_ERR(opp_table)); | |
9f8ea969 VK |
1393 | goto unlock; |
1394 | } | |
1395 | ||
2c2709dc | 1396 | if (IS_ERR(opp_table->regulator)) { |
9f8ea969 VK |
1397 | dev_err(dev, "%s: Doesn't have regulator set\n", __func__); |
1398 | goto unlock; | |
1399 | } | |
1400 | ||
2c2709dc VK |
1401 | /* Make sure there are no concurrent readers while updating opp_table */ |
1402 | WARN_ON(!list_empty(&opp_table->opp_list)); | |
9f8ea969 | 1403 | |
2c2709dc VK |
1404 | regulator_put(opp_table->regulator); |
1405 | opp_table->regulator = ERR_PTR(-ENXIO); | |
9f8ea969 | 1406 | |
2c2709dc VK |
1407 | /* Try freeing opp_table if this was the last blocking resource */ |
1408 | _remove_opp_table(opp_table); | |
9f8ea969 VK |
1409 | |
1410 | unlock: | |
2c2709dc | 1411 | mutex_unlock(&opp_table_lock); |
9f8ea969 VK |
1412 | } |
1413 | EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator); | |
1414 | ||
38393409 VK |
1415 | /** |
1416 | * dev_pm_opp_add() - Add an OPP table from a table definitions | |
1417 | * @dev: device for which we do this operation | |
1418 | * @freq: Frequency in Hz for this OPP | |
1419 | * @u_volt: Voltage in uVolts for this OPP | |
1420 | * | |
2c2709dc | 1421 | * This function adds an opp definition to the opp table and returns status. |
38393409 VK |
1422 | * The opp is made available by default and it can be controlled using |
1423 | * dev_pm_opp_enable/disable functions. | |
1424 | * | |
2c2709dc | 1425 | * Locking: The internal opp_table and opp structures are RCU protected. |
38393409 VK |
1426 | * Hence this function internally uses RCU updater strategy with mutex locks |
1427 | * to keep the integrity of the internal data structures. Callers should ensure | |
1428 | * that this function is *NOT* called under RCU protection or in contexts where | |
1429 | * mutex cannot be locked. | |
1430 | * | |
1431 | * Return: | |
984f16c8 | 1432 | * 0 On success OR |
38393409 | 1433 | * Duplicate OPPs (both freq and volt are same) and opp->available |
984f16c8 | 1434 | * -EEXIST Freq are same and volt are different OR |
38393409 | 1435 | * Duplicate OPPs (both freq and volt are same) and !opp->available |
984f16c8 | 1436 | * -ENOMEM Memory allocation failure |
38393409 VK |
1437 | */ |
1438 | int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) | |
1439 | { | |
b64b9c3f | 1440 | return _opp_add_v1(dev, freq, u_volt, true); |
38393409 | 1441 | } |
5d4879cd | 1442 | EXPORT_SYMBOL_GPL(dev_pm_opp_add); |
e1f60b29 NM |
1443 | |
1444 | /** | |
327854c8 | 1445 | * _opp_set_availability() - helper to set the availability of an opp |
e1f60b29 NM |
1446 | * @dev: device for which we do this operation |
1447 | * @freq: OPP frequency to modify availability | |
1448 | * @availability_req: availability status requested for this opp | |
1449 | * | |
1450 | * Set the availability of an OPP with an RCU operation, opp_{enable,disable} | |
1451 | * share a common logic which is isolated here. | |
1452 | * | |
984f16c8 | 1453 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the |
e1a2d49c | 1454 | * copy operation, returns 0 if no modification was done OR modification was |
e1f60b29 NM |
1455 | * successful. |
1456 | * | |
2c2709dc | 1457 | * Locking: The internal opp_table and opp structures are RCU protected. |
e1f60b29 NM |
1458 | * Hence this function internally uses RCU updater strategy with mutex locks to |
1459 | * keep the integrity of the internal data structures. Callers should ensure | |
1460 | * that this function is *NOT* called under RCU protection or in contexts where | |
1461 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | |
1462 | */ | |
327854c8 NM |
1463 | static int _opp_set_availability(struct device *dev, unsigned long freq, |
1464 | bool availability_req) | |
e1f60b29 | 1465 | { |
2c2709dc | 1466 | struct opp_table *opp_table; |
47d43ba7 | 1467 | struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); |
e1f60b29 NM |
1468 | int r = 0; |
1469 | ||
1470 | /* keep the node allocated */ | |
47d43ba7 | 1471 | new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL); |
59d84ca8 | 1472 | if (!new_opp) |
e1f60b29 | 1473 | return -ENOMEM; |
e1f60b29 | 1474 | |
2c2709dc | 1475 | mutex_lock(&opp_table_lock); |
e1f60b29 | 1476 | |
2c2709dc VK |
1477 | /* Find the opp_table */ |
1478 | opp_table = _find_opp_table(dev); | |
1479 | if (IS_ERR(opp_table)) { | |
1480 | r = PTR_ERR(opp_table); | |
e1f60b29 NM |
1481 | dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); |
1482 | goto unlock; | |
1483 | } | |
1484 | ||
1485 | /* Do we have the frequency? */ | |
2c2709dc | 1486 | list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { |
e1f60b29 NM |
1487 | if (tmp_opp->rate == freq) { |
1488 | opp = tmp_opp; | |
1489 | break; | |
1490 | } | |
1491 | } | |
1492 | if (IS_ERR(opp)) { | |
1493 | r = PTR_ERR(opp); | |
1494 | goto unlock; | |
1495 | } | |
1496 | ||
1497 | /* Is update really needed? */ | |
1498 | if (opp->available == availability_req) | |
1499 | goto unlock; | |
1500 | /* copy the old data over */ | |
1501 | *new_opp = *opp; | |
1502 | ||
1503 | /* plug in new node */ | |
1504 | new_opp->available = availability_req; | |
1505 | ||
1506 | list_replace_rcu(&opp->node, &new_opp->node); | |
2c2709dc VK |
1507 | mutex_unlock(&opp_table_lock); |
1508 | call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); | |
e1f60b29 | 1509 | |
03ca370f MH |
1510 | /* Notify the change of the OPP availability */ |
1511 | if (availability_req) | |
2c2709dc VK |
1512 | srcu_notifier_call_chain(&opp_table->srcu_head, |
1513 | OPP_EVENT_ENABLE, new_opp); | |
03ca370f | 1514 | else |
2c2709dc VK |
1515 | srcu_notifier_call_chain(&opp_table->srcu_head, |
1516 | OPP_EVENT_DISABLE, new_opp); | |
03ca370f | 1517 | |
dde8437d | 1518 | return 0; |
e1f60b29 NM |
1519 | |
1520 | unlock: | |
2c2709dc | 1521 | mutex_unlock(&opp_table_lock); |
e1f60b29 NM |
1522 | kfree(new_opp); |
1523 | return r; | |
1524 | } | |
1525 | ||
1526 | /** | |
5d4879cd | 1527 | * dev_pm_opp_enable() - Enable a specific OPP |
e1f60b29 NM |
1528 | * @dev: device for which we do this operation |
1529 | * @freq: OPP frequency to enable | |
1530 | * | |
1531 | * Enables a provided opp. If the operation is valid, this returns 0, else the | |
1532 | * corresponding error value. It is meant to be used for users an OPP available | |
5d4879cd | 1533 | * after being temporarily made unavailable with dev_pm_opp_disable. |
e1f60b29 | 1534 | * |
2c2709dc | 1535 | * Locking: The internal opp_table and opp structures are RCU protected. |
e1f60b29 NM |
1536 | * Hence this function indirectly uses RCU and mutex locks to keep the |
1537 | * integrity of the internal data structures. Callers should ensure that | |
1538 | * this function is *NOT* called under RCU protection or in contexts where | |
1539 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | |
984f16c8 NM |
1540 | * |
1541 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the | |
e1a2d49c | 1542 | * copy operation, returns 0 if no modification was done OR modification was |
984f16c8 | 1543 | * successful. |
e1f60b29 | 1544 | */ |
5d4879cd | 1545 | int dev_pm_opp_enable(struct device *dev, unsigned long freq) |
e1f60b29 | 1546 | { |
327854c8 | 1547 | return _opp_set_availability(dev, freq, true); |
e1f60b29 | 1548 | } |
5d4879cd | 1549 | EXPORT_SYMBOL_GPL(dev_pm_opp_enable); |
e1f60b29 NM |
1550 | |
1551 | /** | |
5d4879cd | 1552 | * dev_pm_opp_disable() - Disable a specific OPP |
e1f60b29 NM |
1553 | * @dev: device for which we do this operation |
1554 | * @freq: OPP frequency to disable | |
1555 | * | |
1556 | * Disables a provided opp. If the operation is valid, this returns | |
1557 | * 0, else the corresponding error value. It is meant to be a temporary | |
1558 | * control by users to make this OPP not available until the circumstances are | |
5d4879cd | 1559 | * right to make it available again (with a call to dev_pm_opp_enable). |
e1f60b29 | 1560 | * |
2c2709dc | 1561 | * Locking: The internal opp_table and opp structures are RCU protected. |
e1f60b29 NM |
1562 | * Hence this function indirectly uses RCU and mutex locks to keep the |
1563 | * integrity of the internal data structures. Callers should ensure that | |
1564 | * this function is *NOT* called under RCU protection or in contexts where | |
1565 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | |
984f16c8 NM |
1566 | * |
1567 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the | |
e1a2d49c | 1568 | * copy operation, returns 0 if no modification was done OR modification was |
984f16c8 | 1569 | * successful. |
e1f60b29 | 1570 | */ |
5d4879cd | 1571 | int dev_pm_opp_disable(struct device *dev, unsigned long freq) |
e1f60b29 | 1572 | { |
327854c8 | 1573 | return _opp_set_availability(dev, freq, false); |
e1f60b29 | 1574 | } |
5d4879cd | 1575 | EXPORT_SYMBOL_GPL(dev_pm_opp_disable); |
e1f60b29 | 1576 | |
03ca370f | 1577 | /** |
5d4879cd | 1578 | * dev_pm_opp_get_notifier() - find notifier_head of the device with opp |
2c2709dc | 1579 | * @dev: device pointer used to lookup OPP table. |
984f16c8 NM |
1580 | * |
1581 | * Return: pointer to notifier head if found, otherwise -ENODEV or | |
1582 | * -EINVAL based on type of error casted as pointer. value must be checked | |
1583 | * with IS_ERR to determine valid pointer or error result. | |
1584 | * | |
2c2709dc VK |
1585 | * Locking: This function must be called under rcu_read_lock(). opp_table is a |
1586 | * RCU protected pointer. The reason for the same is that the opp pointer which | |
1587 | * is returned will remain valid for use with opp_get_{voltage, freq} only while | |
984f16c8 NM |
1588 | * under the locked area. The pointer returned must be used prior to unlocking |
1589 | * with rcu_read_unlock() to maintain the integrity of the pointer. | |
03ca370f | 1590 | */ |
5d4879cd | 1591 | struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev) |
03ca370f | 1592 | { |
2c2709dc | 1593 | struct opp_table *opp_table = _find_opp_table(dev); |
03ca370f | 1594 | |
2c2709dc VK |
1595 | if (IS_ERR(opp_table)) |
1596 | return ERR_CAST(opp_table); /* matching type */ | |
03ca370f | 1597 | |
2c2709dc | 1598 | return &opp_table->srcu_head; |
03ca370f | 1599 | } |
4679ec37 | 1600 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier); |
b496dfbc | 1601 | |
411466c5 SH |
1602 | /* |
1603 | * Free OPPs either created using static entries present in DT or even the | |
1604 | * dynamically added entries based on remove_all param. | |
b496dfbc | 1605 | */ |
f47b72a1 | 1606 | void _dev_pm_opp_remove_table(struct device *dev, bool remove_all) |
737002b5 | 1607 | { |
2c2709dc | 1608 | struct opp_table *opp_table; |
737002b5 VK |
1609 | struct dev_pm_opp *opp, *tmp; |
1610 | ||
2c2709dc VK |
1611 | /* Hold our table modification lock here */ |
1612 | mutex_lock(&opp_table_lock); | |
06441658 | 1613 | |
2c2709dc VK |
1614 | /* Check for existing table for 'dev' */ |
1615 | opp_table = _find_opp_table(dev); | |
1616 | if (IS_ERR(opp_table)) { | |
1617 | int error = PTR_ERR(opp_table); | |
737002b5 VK |
1618 | |
1619 | if (error != -ENODEV) | |
2c2709dc | 1620 | WARN(1, "%s: opp_table: %d\n", |
737002b5 VK |
1621 | IS_ERR_OR_NULL(dev) ? |
1622 | "Invalid device" : dev_name(dev), | |
1623 | error); | |
06441658 | 1624 | goto unlock; |
737002b5 VK |
1625 | } |
1626 | ||
2c2709dc VK |
1627 | /* Find if opp_table manages a single device */ |
1628 | if (list_is_singular(&opp_table->dev_list)) { | |
06441658 | 1629 | /* Free static OPPs */ |
2c2709dc | 1630 | list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) { |
411466c5 | 1631 | if (remove_all || !opp->dynamic) |
2c2709dc | 1632 | _opp_remove(opp_table, opp, true); |
06441658 VK |
1633 | } |
1634 | } else { | |
2c2709dc | 1635 | _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table); |
737002b5 VK |
1636 | } |
1637 | ||
06441658 | 1638 | unlock: |
2c2709dc | 1639 | mutex_unlock(&opp_table_lock); |
737002b5 | 1640 | } |
129eec55 VK |
1641 | |
1642 | /** | |
411466c5 | 1643 | * dev_pm_opp_remove_table() - Free all OPPs associated with the device |
2c2709dc | 1644 | * @dev: device pointer used to lookup OPP table. |
129eec55 | 1645 | * |
411466c5 SH |
1646 | * Free both OPPs created using static entries present in DT and the |
1647 | * dynamically added entries. | |
984f16c8 | 1648 | * |
2c2709dc | 1649 | * Locking: The internal opp_table and opp structures are RCU protected. |
984f16c8 NM |
1650 | * Hence this function indirectly uses RCU updater strategy with mutex locks |
1651 | * to keep the integrity of the internal data structures. Callers should ensure | |
1652 | * that this function is *NOT* called under RCU protection or in contexts where | |
1653 | * mutex cannot be locked. | |
129eec55 | 1654 | */ |
411466c5 | 1655 | void dev_pm_opp_remove_table(struct device *dev) |
129eec55 | 1656 | { |
411466c5 | 1657 | _dev_pm_opp_remove_table(dev, true); |
8d4d4e98 | 1658 | } |
411466c5 | 1659 | EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table); |