PM / OPP: Move CONFIG_OF dependent code in a separate file
[linux-2.6-block.git] / drivers / base / power / opp / of.c
CommitLineData
f47b72a1
VK
1/*
2 * Generic OPP OF helpers
3 *
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
5 * Nishanth Menon
6 * Romit Dasgupta
7 * Kevin Hilman
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/cpu.h>
17#include <linux/errno.h>
18#include <linux/device.h>
19#include <linux/of.h>
20#include <linux/export.h>
21
22#include "opp.h"
23
24static struct opp_table *_managed_opp(const struct device_node *np)
25{
26 struct opp_table *opp_table;
27
28 list_for_each_entry_rcu(opp_table, &opp_tables, node) {
29 if (opp_table->np == np) {
30 /*
31 * Multiple devices can point to the same OPP table and
32 * so will have same node-pointer, np.
33 *
34 * But the OPPs will be considered as shared only if the
35 * OPP table contains a "opp-shared" property.
36 */
37 return opp_table->shared_opp ? opp_table : NULL;
38 }
39 }
40
41 return NULL;
42}
43
44void _of_init_opp_table(struct opp_table *opp_table, struct device *dev)
45{
46 struct device_node *np;
47
48 /*
49 * Only required for backward compatibility with v1 bindings, but isn't
50 * harmful for other cases. And so we do it unconditionally.
51 */
52 np = of_node_get(dev->of_node);
53 if (np) {
54 u32 val;
55
56 if (!of_property_read_u32(np, "clock-latency", &val))
57 opp_table->clock_latency_ns_max = val;
58 of_property_read_u32(np, "voltage-tolerance",
59 &opp_table->voltage_tolerance_v1);
60 of_node_put(np);
61 }
62}
63
64static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
65 struct device_node *np)
66{
67 unsigned int count = opp_table->supported_hw_count;
68 u32 version;
69 int ret;
70
71 if (!opp_table->supported_hw)
72 return true;
73
74 while (count--) {
75 ret = of_property_read_u32_index(np, "opp-supported-hw", count,
76 &version);
77 if (ret) {
78 dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
79 __func__, count, ret);
80 return false;
81 }
82
83 /* Both of these are bitwise masks of the versions */
84 if (!(version & opp_table->supported_hw[count]))
85 return false;
86 }
87
88 return true;
89}
90
91/* TODO: Support multiple regulators */
92static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
93 struct opp_table *opp_table)
94{
95 u32 microvolt[3] = {0};
96 u32 val;
97 int count, ret;
98 struct property *prop = NULL;
99 char name[NAME_MAX];
100
101 /* Search for "opp-microvolt-<name>" */
102 if (opp_table->prop_name) {
103 snprintf(name, sizeof(name), "opp-microvolt-%s",
104 opp_table->prop_name);
105 prop = of_find_property(opp->np, name, NULL);
106 }
107
108 if (!prop) {
109 /* Search for "opp-microvolt" */
110 sprintf(name, "opp-microvolt");
111 prop = of_find_property(opp->np, name, NULL);
112
113 /* Missing property isn't a problem, but an invalid entry is */
114 if (!prop)
115 return 0;
116 }
117
118 count = of_property_count_u32_elems(opp->np, name);
119 if (count < 0) {
120 dev_err(dev, "%s: Invalid %s property (%d)\n",
121 __func__, name, count);
122 return count;
123 }
124
125 /* There can be one or three elements here */
126 if (count != 1 && count != 3) {
127 dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
128 __func__, name, count);
129 return -EINVAL;
130 }
131
132 ret = of_property_read_u32_array(opp->np, name, microvolt, count);
133 if (ret) {
134 dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
135 return -EINVAL;
136 }
137
138 opp->u_volt = microvolt[0];
139
140 if (count == 1) {
141 opp->u_volt_min = opp->u_volt;
142 opp->u_volt_max = opp->u_volt;
143 } else {
144 opp->u_volt_min = microvolt[1];
145 opp->u_volt_max = microvolt[2];
146 }
147
148 /* Search for "opp-microamp-<name>" */
149 prop = NULL;
150 if (opp_table->prop_name) {
151 snprintf(name, sizeof(name), "opp-microamp-%s",
152 opp_table->prop_name);
153 prop = of_find_property(opp->np, name, NULL);
154 }
155
156 if (!prop) {
157 /* Search for "opp-microamp" */
158 sprintf(name, "opp-microamp");
159 prop = of_find_property(opp->np, name, NULL);
160 }
161
162 if (prop && !of_property_read_u32(opp->np, name, &val))
163 opp->u_amp = val;
164
165 return 0;
166}
167
168/**
169 * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
170 * entries
171 * @dev: device pointer used to lookup OPP table.
172 *
173 * Free OPPs created using static entries present in DT.
174 *
175 * Locking: The internal opp_table and opp structures are RCU protected.
176 * Hence this function indirectly uses RCU updater strategy with mutex locks
177 * to keep the integrity of the internal data structures. Callers should ensure
178 * that this function is *NOT* called under RCU protection or in contexts where
179 * mutex cannot be locked.
180 */
181void dev_pm_opp_of_remove_table(struct device *dev)
182{
183 _dev_pm_opp_remove_table(dev, false);
184}
185EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
186
187/* Returns opp descriptor node for a device, caller must do of_node_put() */
188struct device_node *_of_get_opp_desc_node(struct device *dev)
189{
190 /*
191 * TODO: Support for multiple OPP tables.
192 *
193 * There should be only ONE phandle present in "operating-points-v2"
194 * property.
195 */
196
197 return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
198}
199
200/**
201 * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
202 * @dev: device for which we do this operation
203 * @np: device node
204 *
205 * This function adds an opp definition to the opp table and returns status. The
206 * opp can be controlled using dev_pm_opp_enable/disable functions and may be
207 * removed by dev_pm_opp_remove.
208 *
209 * Locking: The internal opp_table and opp structures are RCU protected.
210 * Hence this function internally uses RCU updater strategy with mutex locks
211 * to keep the integrity of the internal data structures. Callers should ensure
212 * that this function is *NOT* called under RCU protection or in contexts where
213 * mutex cannot be locked.
214 *
215 * Return:
216 * 0 On success OR
217 * Duplicate OPPs (both freq and volt are same) and opp->available
218 * -EEXIST Freq are same and volt are different OR
219 * Duplicate OPPs (both freq and volt are same) and !opp->available
220 * -ENOMEM Memory allocation failure
221 * -EINVAL Failed parsing the OPP node
222 */
223static int _opp_add_static_v2(struct device *dev, struct device_node *np)
224{
225 struct opp_table *opp_table;
226 struct dev_pm_opp *new_opp;
227 u64 rate;
228 u32 val;
229 int ret;
230
231 /* Hold our table modification lock here */
232 mutex_lock(&opp_table_lock);
233
234 new_opp = _allocate_opp(dev, &opp_table);
235 if (!new_opp) {
236 ret = -ENOMEM;
237 goto unlock;
238 }
239
240 ret = of_property_read_u64(np, "opp-hz", &rate);
241 if (ret < 0) {
242 dev_err(dev, "%s: opp-hz not found\n", __func__);
243 goto free_opp;
244 }
245
246 /* Check if the OPP supports hardware's hierarchy of versions or not */
247 if (!_opp_is_supported(dev, opp_table, np)) {
248 dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
249 goto free_opp;
250 }
251
252 /*
253 * Rate is defined as an unsigned long in clk API, and so casting
254 * explicitly to its type. Must be fixed once rate is 64 bit
255 * guaranteed in clk API.
256 */
257 new_opp->rate = (unsigned long)rate;
258 new_opp->turbo = of_property_read_bool(np, "turbo-mode");
259
260 new_opp->np = np;
261 new_opp->dynamic = false;
262 new_opp->available = true;
263
264 if (!of_property_read_u32(np, "clock-latency-ns", &val))
265 new_opp->clock_latency_ns = val;
266
267 ret = opp_parse_supplies(new_opp, dev, opp_table);
268 if (ret)
269 goto free_opp;
270
271 ret = _opp_add(dev, new_opp, opp_table);
272 if (ret)
273 goto free_opp;
274
275 /* OPP to select on device suspend */
276 if (of_property_read_bool(np, "opp-suspend")) {
277 if (opp_table->suspend_opp) {
278 dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
279 __func__, opp_table->suspend_opp->rate,
280 new_opp->rate);
281 } else {
282 new_opp->suspend = true;
283 opp_table->suspend_opp = new_opp;
284 }
285 }
286
287 if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
288 opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
289
290 mutex_unlock(&opp_table_lock);
291
292 pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
293 __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
294 new_opp->u_volt_min, new_opp->u_volt_max,
295 new_opp->clock_latency_ns);
296
297 /*
298 * Notify the changes in the availability of the operable
299 * frequency/voltage list.
300 */
301 srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
302 return 0;
303
304free_opp:
305 _opp_remove(opp_table, new_opp, false);
306unlock:
307 mutex_unlock(&opp_table_lock);
308 return ret;
309}
310
311/* Initializes OPP tables based on new bindings */
312static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
313{
314 struct device_node *np;
315 struct opp_table *opp_table;
316 int ret = 0, count = 0;
317
318 mutex_lock(&opp_table_lock);
319
320 opp_table = _managed_opp(opp_np);
321 if (opp_table) {
322 /* OPPs are already managed */
323 if (!_add_opp_dev(dev, opp_table))
324 ret = -ENOMEM;
325 mutex_unlock(&opp_table_lock);
326 return ret;
327 }
328 mutex_unlock(&opp_table_lock);
329
330 /* We have opp-table node now, iterate over it and add OPPs */
331 for_each_available_child_of_node(opp_np, np) {
332 count++;
333
334 ret = _opp_add_static_v2(dev, np);
335 if (ret) {
336 dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
337 ret);
338 goto free_table;
339 }
340 }
341
342 /* There should be one of more OPP defined */
343 if (WARN_ON(!count))
344 return -ENOENT;
345
346 mutex_lock(&opp_table_lock);
347
348 opp_table = _find_opp_table(dev);
349 if (WARN_ON(IS_ERR(opp_table))) {
350 ret = PTR_ERR(opp_table);
351 mutex_unlock(&opp_table_lock);
352 goto free_table;
353 }
354
355 opp_table->np = opp_np;
356 opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared");
357
358 mutex_unlock(&opp_table_lock);
359
360 return 0;
361
362free_table:
363 dev_pm_opp_of_remove_table(dev);
364
365 return ret;
366}
367
368/* Initializes OPP tables based on old-deprecated bindings */
369static int _of_add_opp_table_v1(struct device *dev)
370{
371 const struct property *prop;
372 const __be32 *val;
373 int nr;
374
375 prop = of_find_property(dev->of_node, "operating-points", NULL);
376 if (!prop)
377 return -ENODEV;
378 if (!prop->value)
379 return -ENODATA;
380
381 /*
382 * Each OPP is a set of tuples consisting of frequency and
383 * voltage like <freq-kHz vol-uV>.
384 */
385 nr = prop->length / sizeof(u32);
386 if (nr % 2) {
387 dev_err(dev, "%s: Invalid OPP table\n", __func__);
388 return -EINVAL;
389 }
390
391 val = prop->value;
392 while (nr) {
393 unsigned long freq = be32_to_cpup(val++) * 1000;
394 unsigned long volt = be32_to_cpup(val++);
395
396 if (_opp_add_v1(dev, freq, volt, false))
397 dev_warn(dev, "%s: Failed to add OPP %ld\n",
398 __func__, freq);
399 nr -= 2;
400 }
401
402 return 0;
403}
404
405/**
406 * dev_pm_opp_of_add_table() - Initialize opp table from device tree
407 * @dev: device pointer used to lookup OPP table.
408 *
409 * Register the initial OPP table with the OPP library for given device.
410 *
411 * Locking: The internal opp_table and opp structures are RCU protected.
412 * Hence this function indirectly uses RCU updater strategy with mutex locks
413 * to keep the integrity of the internal data structures. Callers should ensure
414 * that this function is *NOT* called under RCU protection or in contexts where
415 * mutex cannot be locked.
416 *
417 * Return:
418 * 0 On success OR
419 * Duplicate OPPs (both freq and volt are same) and opp->available
420 * -EEXIST Freq are same and volt are different OR
421 * Duplicate OPPs (both freq and volt are same) and !opp->available
422 * -ENOMEM Memory allocation failure
423 * -ENODEV when 'operating-points' property is not found or is invalid data
424 * in device node.
425 * -ENODATA when empty 'operating-points' property is found
426 * -EINVAL when invalid entries are found in opp-v2 table
427 */
428int dev_pm_opp_of_add_table(struct device *dev)
429{
430 struct device_node *opp_np;
431 int ret;
432
433 /*
434 * OPPs have two version of bindings now. The older one is deprecated,
435 * try for the new binding first.
436 */
437 opp_np = _of_get_opp_desc_node(dev);
438 if (!opp_np) {
439 /*
440 * Try old-deprecated bindings for backward compatibility with
441 * older dtbs.
442 */
443 return _of_add_opp_table_v1(dev);
444 }
445
446 ret = _of_add_opp_table_v2(dev, opp_np);
447 of_node_put(opp_np);
448
449 return ret;
450}
451EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
452
453/* CPU device specific helpers */
454
455/**
456 * dev_pm_opp_of_cpumask_remove_table() - Removes OPP table for @cpumask
457 * @cpumask: cpumask for which OPP table needs to be removed
458 *
459 * This removes the OPP tables for CPUs present in the @cpumask.
460 * This should be used only to remove static entries created from DT.
461 *
462 * Locking: The internal opp_table and opp structures are RCU protected.
463 * Hence this function internally uses RCU updater strategy with mutex locks
464 * to keep the integrity of the internal data structures. Callers should ensure
465 * that this function is *NOT* called under RCU protection or in contexts where
466 * mutex cannot be locked.
467 */
468void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
469{
470 _dev_pm_opp_cpumask_remove_table(cpumask, true);
471}
472EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
473
474/**
475 * dev_pm_opp_of_cpumask_add_table() - Adds OPP table for @cpumask
476 * @cpumask: cpumask for which OPP table needs to be added.
477 *
478 * This adds the OPP tables for CPUs present in the @cpumask.
479 *
480 * Locking: The internal opp_table and opp structures are RCU protected.
481 * Hence this function internally uses RCU updater strategy with mutex locks
482 * to keep the integrity of the internal data structures. Callers should ensure
483 * that this function is *NOT* called under RCU protection or in contexts where
484 * mutex cannot be locked.
485 */
486int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
487{
488 struct device *cpu_dev;
489 int cpu, ret = 0;
490
491 WARN_ON(cpumask_empty(cpumask));
492
493 for_each_cpu(cpu, cpumask) {
494 cpu_dev = get_cpu_device(cpu);
495 if (!cpu_dev) {
496 pr_err("%s: failed to get cpu%d device\n", __func__,
497 cpu);
498 continue;
499 }
500
501 ret = dev_pm_opp_of_add_table(cpu_dev);
502 if (ret) {
503 pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
504 __func__, cpu, ret);
505
506 /* Free all other OPPs */
507 dev_pm_opp_of_cpumask_remove_table(cpumask);
508 break;
509 }
510 }
511
512 return ret;
513}
514EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
515
516/*
517 * Works only for OPP v2 bindings.
518 *
519 * Returns -ENOENT if operating-points-v2 bindings aren't supported.
520 */
521/**
522 * dev_pm_opp_of_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with
523 * @cpu_dev using operating-points-v2
524 * bindings.
525 *
526 * @cpu_dev: CPU device for which we do this operation
527 * @cpumask: cpumask to update with information of sharing CPUs
528 *
529 * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
530 *
531 * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev.
532 *
533 * Locking: The internal opp_table and opp structures are RCU protected.
534 * Hence this function internally uses RCU updater strategy with mutex locks
535 * to keep the integrity of the internal data structures. Callers should ensure
536 * that this function is *NOT* called under RCU protection or in contexts where
537 * mutex cannot be locked.
538 */
539int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
540 struct cpumask *cpumask)
541{
542 struct device_node *np, *tmp_np;
543 struct device *tcpu_dev;
544 int cpu, ret = 0;
545
546 /* Get OPP descriptor node */
547 np = _of_get_opp_desc_node(cpu_dev);
548 if (!np) {
549 dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__);
550 return -ENOENT;
551 }
552
553 cpumask_set_cpu(cpu_dev->id, cpumask);
554
555 /* OPPs are shared ? */
556 if (!of_property_read_bool(np, "opp-shared"))
557 goto put_cpu_node;
558
559 for_each_possible_cpu(cpu) {
560 if (cpu == cpu_dev->id)
561 continue;
562
563 tcpu_dev = get_cpu_device(cpu);
564 if (!tcpu_dev) {
565 dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
566 __func__, cpu);
567 ret = -ENODEV;
568 goto put_cpu_node;
569 }
570
571 /* Get OPP descriptor node */
572 tmp_np = _of_get_opp_desc_node(tcpu_dev);
573 if (!tmp_np) {
574 dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n",
575 __func__);
576 ret = -ENOENT;
577 goto put_cpu_node;
578 }
579
580 /* CPUs are sharing opp node */
581 if (np == tmp_np)
582 cpumask_set_cpu(cpu, cpumask);
583
584 of_node_put(tmp_np);
585 }
586
587put_cpu_node:
588 of_node_put(np);
589 return ret;
590}
591EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);