PM / OPP: Add dev_pm_opp_get_sharing_cpus()
[linux-2.6-block.git] / drivers / base / power / opp / cpu.c
CommitLineData
a0dd7b79 1/*
33692dc3 2 * Generic OPP helper interface for CPU device
a0dd7b79
NM
3 *
4 * Copyright (C) 2009-2014 Texas Instruments Incorporated.
5 * Nishanth Menon
6 * Romit Dasgupta
7 * Kevin Hilman
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
d6d2a528
VK
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
f59d3ee8 16#include <linux/cpu.h>
a0dd7b79 17#include <linux/cpufreq.h>
a0dd7b79
NM
18#include <linux/err.h>
19#include <linux/errno.h>
20#include <linux/export.h>
f59d3ee8 21#include <linux/of.h>
a0dd7b79
NM
22#include <linux/slab.h>
23
f59d3ee8
VK
24#include "opp.h"
25
33692dc3 26#ifdef CONFIG_CPU_FREQ
f59d3ee8 27
a0dd7b79
NM
28/**
29 * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
30 * @dev: device for which we do this operation
31 * @table: Cpufreq table returned back to caller
32 *
33 * Generate a cpufreq table for a provided device- this assumes that the
2c2709dc 34 * opp table is already initialized and ready for usage.
a0dd7b79
NM
35 *
36 * This function allocates required memory for the cpufreq table. It is
37 * expected that the caller does the required maintenance such as freeing
38 * the table as required.
39 *
40 * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
41 * if no memory available for the operation (table is not populated), returns 0
42 * if successful and table is populated.
43 *
44 * WARNING: It is important for the callers to ensure refreshing their copy of
45 * the table if any of the mentioned functions have been invoked in the interim.
46 *
2c2709dc 47 * Locking: The internal opp_table and opp structures are RCU protected.
a0dd7b79
NM
48 * Since we just use the regular accessor functions to access the internal data
49 * structures, we use RCU read lock inside this function. As a result, users of
50 * this function DONOT need to use explicit locks for invoking.
51 */
52int dev_pm_opp_init_cpufreq_table(struct device *dev,
53 struct cpufreq_frequency_table **table)
54{
55 struct dev_pm_opp *opp;
56 struct cpufreq_frequency_table *freq_table = NULL;
57 int i, max_opps, ret = 0;
58 unsigned long rate;
59
60 rcu_read_lock();
61
62 max_opps = dev_pm_opp_get_opp_count(dev);
63 if (max_opps <= 0) {
64 ret = max_opps ? max_opps : -ENODATA;
65 goto out;
66 }
67
d3599920 68 freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC);
a0dd7b79
NM
69 if (!freq_table) {
70 ret = -ENOMEM;
71 goto out;
72 }
73
74 for (i = 0, rate = 0; i < max_opps; i++, rate++) {
75 /* find next rate */
76 opp = dev_pm_opp_find_freq_ceil(dev, &rate);
77 if (IS_ERR(opp)) {
78 ret = PTR_ERR(opp);
79 goto out;
80 }
81 freq_table[i].driver_data = i;
82 freq_table[i].frequency = rate / 1000;
79eea44a
BZ
83
84 /* Is Boost/turbo opp ? */
85 if (dev_pm_opp_is_turbo(opp))
86 freq_table[i].flags = CPUFREQ_BOOST_FREQ;
a0dd7b79
NM
87 }
88
89 freq_table[i].driver_data = i;
90 freq_table[i].frequency = CPUFREQ_TABLE_END;
91
92 *table = &freq_table[0];
93
94out:
95 rcu_read_unlock();
96 if (ret)
97 kfree(freq_table);
98
99 return ret;
100}
101EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
102
103/**
104 * dev_pm_opp_free_cpufreq_table() - free the cpufreq table
105 * @dev: device for which we do this operation
106 * @table: table to free
107 *
108 * Free up the table allocated by dev_pm_opp_init_cpufreq_table
109 */
110void dev_pm_opp_free_cpufreq_table(struct device *dev,
111 struct cpufreq_frequency_table **table)
112{
113 if (!table)
114 return;
115
116 kfree(*table);
117 *table = NULL;
118}
119EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
33692dc3 120#endif /* CONFIG_CPU_FREQ */
f59d3ee8 121
f59d3ee8 122#ifdef CONFIG_OF
45ca36ad
VK
123/**
124 * dev_pm_opp_of_cpumask_remove_table() - Removes OPP table for @cpumask
125 * @cpumask: cpumask for which OPP table needs to be removed
126 *
127 * This removes the OPP tables for CPUs present in the @cpumask.
128 *
129 * Locking: The internal opp_table and opp structures are RCU protected.
130 * Hence this function internally uses RCU updater strategy with mutex locks
131 * to keep the integrity of the internal data structures. Callers should ensure
132 * that this function is *NOT* called under RCU protection or in contexts where
133 * mutex cannot be locked.
134 */
f59d3ee8
VK
135void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask)
136{
137 struct device *cpu_dev;
138 int cpu;
139
140 WARN_ON(cpumask_empty(cpumask));
141
142 for_each_cpu(cpu, cpumask) {
143 cpu_dev = get_cpu_device(cpu);
144 if (!cpu_dev) {
145 pr_err("%s: failed to get cpu%d device\n", __func__,
146 cpu);
147 continue;
148 }
149
150 dev_pm_opp_of_remove_table(cpu_dev);
151 }
152}
153EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
154
45ca36ad
VK
155/**
156 * dev_pm_opp_of_cpumask_add_table() - Adds OPP table for @cpumask
157 * @cpumask: cpumask for which OPP table needs to be added.
158 *
159 * This adds the OPP tables for CPUs present in the @cpumask.
160 *
161 * Locking: The internal opp_table and opp structures are RCU protected.
162 * Hence this function internally uses RCU updater strategy with mutex locks
163 * to keep the integrity of the internal data structures. Callers should ensure
164 * that this function is *NOT* called under RCU protection or in contexts where
165 * mutex cannot be locked.
166 */
f59d3ee8
VK
167int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask)
168{
169 struct device *cpu_dev;
170 int cpu, ret = 0;
171
172 WARN_ON(cpumask_empty(cpumask));
173
174 for_each_cpu(cpu, cpumask) {
175 cpu_dev = get_cpu_device(cpu);
176 if (!cpu_dev) {
177 pr_err("%s: failed to get cpu%d device\n", __func__,
178 cpu);
179 continue;
180 }
181
182 ret = dev_pm_opp_of_add_table(cpu_dev);
183 if (ret) {
184 pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
185 __func__, cpu, ret);
186
187 /* Free all other OPPs */
188 dev_pm_opp_of_cpumask_remove_table(cpumask);
189 break;
190 }
191 }
192
193 return ret;
194}
195EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
196
197/*
198 * Works only for OPP v2 bindings.
199 *
f59d3ee8
VK
200 * Returns -ENOENT if operating-points-v2 bindings aren't supported.
201 */
45ca36ad
VK
202/**
203 * dev_pm_opp_of_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with
204 * @cpu_dev using operating-points-v2
205 * bindings.
206 *
207 * @cpu_dev: CPU device for which we do this operation
208 * @cpumask: cpumask to update with information of sharing CPUs
209 *
210 * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
211 *
212 * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev.
213 *
214 * Locking: The internal opp_table and opp structures are RCU protected.
215 * Hence this function internally uses RCU updater strategy with mutex locks
216 * to keep the integrity of the internal data structures. Callers should ensure
217 * that this function is *NOT* called under RCU protection or in contexts where
218 * mutex cannot be locked.
219 */
f59d3ee8
VK
220int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
221{
222 struct device_node *np, *tmp_np;
223 struct device *tcpu_dev;
224 int cpu, ret = 0;
225
226 /* Get OPP descriptor node */
227 np = _of_get_opp_desc_node(cpu_dev);
228 if (!np) {
a6eed752 229 dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__);
f59d3ee8
VK
230 return -ENOENT;
231 }
232
d9de19b1
PCC
233 cpumask_set_cpu(cpu_dev->id, cpumask);
234
f59d3ee8
VK
235 /* OPPs are shared ? */
236 if (!of_property_read_bool(np, "opp-shared"))
237 goto put_cpu_node;
238
239 for_each_possible_cpu(cpu) {
240 if (cpu == cpu_dev->id)
241 continue;
242
243 tcpu_dev = get_cpu_device(cpu);
244 if (!tcpu_dev) {
245 dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
246 __func__, cpu);
247 ret = -ENODEV;
248 goto put_cpu_node;
249 }
250
251 /* Get OPP descriptor node */
252 tmp_np = _of_get_opp_desc_node(tcpu_dev);
253 if (!tmp_np) {
a6eed752
DC
254 dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n",
255 __func__);
256 ret = -ENOENT;
f59d3ee8
VK
257 goto put_cpu_node;
258 }
259
260 /* CPUs are sharing opp node */
261 if (np == tmp_np)
262 cpumask_set_cpu(cpu, cpumask);
263
264 of_node_put(tmp_np);
265 }
266
267put_cpu_node:
268 of_node_put(np);
269 return ret;
270}
271EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
272#endif
2c93104f
VK
273
274/**
275 * dev_pm_opp_set_sharing_cpus() - Mark OPP table as shared by few CPUs
276 * @cpu_dev: CPU device for which we do this operation
277 * @cpumask: cpumask of the CPUs which share the OPP table with @cpu_dev
278 *
279 * This marks OPP table of the @cpu_dev as shared by the CPUs present in
280 * @cpumask.
281 *
282 * Returns -ENODEV if OPP table isn't already present.
283 *
284 * Locking: The internal opp_table and opp structures are RCU protected.
285 * Hence this function internally uses RCU updater strategy with mutex locks
286 * to keep the integrity of the internal data structures. Callers should ensure
287 * that this function is *NOT* called under RCU protection or in contexts where
288 * mutex cannot be locked.
289 */
dde370b2
VK
290int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev,
291 const cpumask_var_t cpumask)
2c93104f
VK
292{
293 struct opp_device *opp_dev;
294 struct opp_table *opp_table;
295 struct device *dev;
296 int cpu, ret = 0;
297
298 mutex_lock(&opp_table_lock);
299
300 opp_table = _find_opp_table(cpu_dev);
301 if (IS_ERR(opp_table)) {
302 ret = PTR_ERR(opp_table);
303 goto unlock;
304 }
305
306 for_each_cpu(cpu, cpumask) {
307 if (cpu == cpu_dev->id)
308 continue;
309
310 dev = get_cpu_device(cpu);
311 if (!dev) {
312 dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
313 __func__, cpu);
314 continue;
315 }
316
317 opp_dev = _add_opp_dev(dev, opp_table);
318 if (!opp_dev) {
319 dev_err(dev, "%s: failed to add opp-dev for cpu%d device\n",
320 __func__, cpu);
321 continue;
322 }
46e7a4e1
VK
323
324 /* Mark opp-table as multiple CPUs are sharing it now */
325 opp_table->shared_opp = true;
2c93104f
VK
326 }
327unlock:
328 mutex_unlock(&opp_table_lock);
329
330 return ret;
331}
332EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
6f707daa
VK
333
334/**
335 * dev_pm_opp_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with @cpu_dev
336 * @cpu_dev: CPU device for which we do this operation
337 * @cpumask: cpumask to update with information of sharing CPUs
338 *
339 * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
340 *
341 * Returns -ENODEV if OPP table isn't already present.
342 *
343 * Locking: The internal opp_table and opp structures are RCU protected.
344 * Hence this function internally uses RCU updater strategy with mutex locks
345 * to keep the integrity of the internal data structures. Callers should ensure
346 * that this function is *NOT* called under RCU protection or in contexts where
347 * mutex cannot be locked.
348 */
349int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
350{
351 struct opp_device *opp_dev;
352 struct opp_table *opp_table;
353 int ret = 0;
354
355 mutex_lock(&opp_table_lock);
356
357 opp_table = _find_opp_table(cpu_dev);
358 if (IS_ERR(opp_table)) {
359 ret = PTR_ERR(opp_table);
360 goto unlock;
361 }
362
363 cpumask_clear(cpumask);
364
365 if (opp_table->shared_opp) {
366 list_for_each_entry(opp_dev, &opp_table->dev_list, node)
367 cpumask_set_cpu(opp_dev->dev->id, cpumask);
368 } else {
369 cpumask_set_cpu(cpu_dev->id, cpumask);
370 }
371
372unlock:
373 mutex_unlock(&opp_table_lock);
374
375 return ret;
376}
377EXPORT_SYMBOL_GPL(dev_pm_opp_get_sharing_cpus);