2 * Generic OPP helper interface for CPU device
4 * Copyright (C) 2009-2014 Texas Instruments Incorporated.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/cpu.h>
17 #include <linux/cpufreq.h>
18 #include <linux/err.h>
19 #include <linux/errno.h>
20 #include <linux/export.h>
22 #include <linux/slab.h>
26 #ifdef CONFIG_CPU_FREQ
29 * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
30 * @dev: device for which we do this operation
31 * @table: Cpufreq table returned back to caller
33 * Generate a cpufreq table for a provided device- this assumes that the
34 * opp table is already initialized and ready for usage.
36 * This function allocates required memory for the cpufreq table. It is
37 * expected that the caller does the required maintenance such as freeing
38 * the table as required.
40 * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
41 * if no memory available for the operation (table is not populated), returns 0
42 * if successful and table is populated.
44 * WARNING: It is important for the callers to ensure refreshing their copy of
45 * the table if any of the mentioned functions have been invoked in the interim.
47 * Locking: The internal opp_table and opp structures are RCU protected.
48 * Since we just use the regular accessor functions to access the internal data
49 * structures, we use RCU read lock inside this function. As a result, users of
50 * this function DONOT need to use explicit locks for invoking.
52 int dev_pm_opp_init_cpufreq_table(struct device *dev,
53 struct cpufreq_frequency_table **table)
55 struct dev_pm_opp *opp;
56 struct cpufreq_frequency_table *freq_table = NULL;
57 int i, max_opps, ret = 0;
62 max_opps = dev_pm_opp_get_opp_count(dev);
64 ret = max_opps ? max_opps : -ENODATA;
68 freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC);
74 for (i = 0, rate = 0; i < max_opps; i++, rate++) {
76 opp = dev_pm_opp_find_freq_ceil(dev, &rate);
81 freq_table[i].driver_data = i;
82 freq_table[i].frequency = rate / 1000;
84 /* Is Boost/turbo opp ? */
85 if (dev_pm_opp_is_turbo(opp))
86 freq_table[i].flags = CPUFREQ_BOOST_FREQ;
89 freq_table[i].driver_data = i;
90 freq_table[i].frequency = CPUFREQ_TABLE_END;
92 *table = &freq_table[0];
101 EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
104 * dev_pm_opp_free_cpufreq_table() - free the cpufreq table
105 * @dev: device for which we do this operation
106 * @table: table to free
108 * Free up the table allocated by dev_pm_opp_init_cpufreq_table
110 void dev_pm_opp_free_cpufreq_table(struct device *dev,
111 struct cpufreq_frequency_table **table)
119 EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
120 #endif /* CONFIG_CPU_FREQ */
123 * dev_pm_opp_set_sharing_cpus() - Mark OPP table as shared by few CPUs
124 * @cpu_dev: CPU device for which we do this operation
125 * @cpumask: cpumask of the CPUs which share the OPP table with @cpu_dev
127 * This marks OPP table of the @cpu_dev as shared by the CPUs present in
130 * Returns -ENODEV if OPP table isn't already present.
132 * Locking: The internal opp_table and opp structures are RCU protected.
133 * Hence this function internally uses RCU updater strategy with mutex locks
134 * to keep the integrity of the internal data structures. Callers should ensure
135 * that this function is *NOT* called under RCU protection or in contexts where
136 * mutex cannot be locked.
138 int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
140 struct opp_device *opp_dev;
141 struct opp_table *opp_table;
145 mutex_lock(&opp_table_lock);
147 opp_table = _find_opp_table(cpu_dev);
148 if (IS_ERR(opp_table)) {
149 ret = PTR_ERR(opp_table);
153 for_each_cpu(cpu, cpumask) {
154 if (cpu == cpu_dev->id)
157 dev = get_cpu_device(cpu);
159 dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
164 opp_dev = _add_opp_dev(dev, opp_table);
166 dev_err(dev, "%s: failed to add opp-dev for cpu%d device\n",
172 mutex_unlock(&opp_table_lock);
176 EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
180 * dev_pm_opp_of_cpumask_remove_table() - Removes OPP table for @cpumask
181 * @cpumask: cpumask for which OPP table needs to be removed
183 * This removes the OPP tables for CPUs present in the @cpumask.
185 * Locking: The internal opp_table and opp structures are RCU protected.
186 * Hence this function internally uses RCU updater strategy with mutex locks
187 * to keep the integrity of the internal data structures. Callers should ensure
188 * that this function is *NOT* called under RCU protection or in contexts where
189 * mutex cannot be locked.
191 void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask)
193 struct device *cpu_dev;
196 WARN_ON(cpumask_empty(cpumask));
198 for_each_cpu(cpu, cpumask) {
199 cpu_dev = get_cpu_device(cpu);
201 pr_err("%s: failed to get cpu%d device\n", __func__,
206 dev_pm_opp_of_remove_table(cpu_dev);
209 EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
212 * dev_pm_opp_of_cpumask_add_table() - Adds OPP table for @cpumask
213 * @cpumask: cpumask for which OPP table needs to be added.
215 * This adds the OPP tables for CPUs present in the @cpumask.
217 * Locking: The internal opp_table and opp structures are RCU protected.
218 * Hence this function internally uses RCU updater strategy with mutex locks
219 * to keep the integrity of the internal data structures. Callers should ensure
220 * that this function is *NOT* called under RCU protection or in contexts where
221 * mutex cannot be locked.
223 int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask)
225 struct device *cpu_dev;
228 WARN_ON(cpumask_empty(cpumask));
230 for_each_cpu(cpu, cpumask) {
231 cpu_dev = get_cpu_device(cpu);
233 pr_err("%s: failed to get cpu%d device\n", __func__,
238 ret = dev_pm_opp_of_add_table(cpu_dev);
240 pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
243 /* Free all other OPPs */
244 dev_pm_opp_of_cpumask_remove_table(cpumask);
251 EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
254 * Works only for OPP v2 bindings.
256 * Returns -ENOENT if operating-points-v2 bindings aren't supported.
259 * dev_pm_opp_of_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with
260 * @cpu_dev using operating-points-v2
263 * @cpu_dev: CPU device for which we do this operation
264 * @cpumask: cpumask to update with information of sharing CPUs
266 * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
268 * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev.
270 * Locking: The internal opp_table and opp structures are RCU protected.
271 * Hence this function internally uses RCU updater strategy with mutex locks
272 * to keep the integrity of the internal data structures. Callers should ensure
273 * that this function is *NOT* called under RCU protection or in contexts where
274 * mutex cannot be locked.
276 int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
278 struct device_node *np, *tmp_np;
279 struct device *tcpu_dev;
282 /* Get OPP descriptor node */
283 np = _of_get_opp_desc_node(cpu_dev);
285 dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__);
289 cpumask_set_cpu(cpu_dev->id, cpumask);
291 /* OPPs are shared ? */
292 if (!of_property_read_bool(np, "opp-shared"))
295 for_each_possible_cpu(cpu) {
296 if (cpu == cpu_dev->id)
299 tcpu_dev = get_cpu_device(cpu);
301 dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
307 /* Get OPP descriptor node */
308 tmp_np = _of_get_opp_desc_node(tcpu_dev);
310 dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n",
316 /* CPUs are sharing opp node */
318 cpumask_set_cpu(cpu, cpumask);
327 EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);