Commit | Line | Data |
---|---|---|
8a67f0ef VK |
1 | /* |
2 | * ARM big.LITTLE Platforms CPUFreq support | |
3 | * | |
4 | * Copyright (C) 2013 ARM Ltd. | |
5 | * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com> | |
6 | * | |
7 | * Copyright (C) 2013 Linaro. | |
8 | * Viresh Kumar <viresh.kumar@linaro.org> | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License version 2 as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | |
15 | * kind, whether express or implied; without even the implied warranty | |
16 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | */ | |
19 | ||
20 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
21 | ||
22 | #include <linux/clk.h> | |
23 | #include <linux/cpu.h> | |
24 | #include <linux/cpufreq.h> | |
25 | #include <linux/cpumask.h> | |
26 | #include <linux/export.h> | |
e79a23c5 | 27 | #include <linux/mutex.h> |
8a67f0ef | 28 | #include <linux/of_platform.h> |
e4db1c74 | 29 | #include <linux/pm_opp.h> |
8a67f0ef VK |
30 | #include <linux/slab.h> |
31 | #include <linux/topology.h> | |
32 | #include <linux/types.h> | |
e79a23c5 | 33 | #include <asm/bL_switcher.h> |
8a67f0ef VK |
34 | |
35 | #include "arm_big_little.h" | |
36 | ||
37 | /* Currently we support only two clusters */ | |
e79a23c5 VK |
38 | #define A15_CLUSTER 0 |
39 | #define A7_CLUSTER 1 | |
8a67f0ef VK |
40 | #define MAX_CLUSTERS 2 |
41 | ||
e79a23c5 | 42 | #ifdef CONFIG_BL_SWITCHER |
45cac118 NP |
43 | static bool bL_switching_enabled; |
44 | #define is_bL_switching_enabled() bL_switching_enabled | |
45 | #define set_switching_enabled(x) (bL_switching_enabled = (x)) | |
e79a23c5 VK |
46 | #else |
47 | #define is_bL_switching_enabled() false | |
45cac118 | 48 | #define set_switching_enabled(x) do { } while (0) |
e79a23c5 VK |
49 | #endif |
50 | ||
51 | #define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq) | |
52 | #define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq) | |
53 | ||
8a67f0ef VK |
54 | static struct cpufreq_arm_bL_ops *arm_bL_ops; |
55 | static struct clk *clk[MAX_CLUSTERS]; | |
e79a23c5 VK |
56 | static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1]; |
57 | static atomic_t cluster_usage[MAX_CLUSTERS + 1]; | |
58 | ||
59 | static unsigned int clk_big_min; /* (Big) clock frequencies */ | |
60 | static unsigned int clk_little_max; /* Maximum clock frequency (Little) */ | |
61 | ||
62 | static DEFINE_PER_CPU(unsigned int, physical_cluster); | |
63 | static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq); | |
64 | ||
65 | static struct mutex cluster_lock[MAX_CLUSTERS]; | |
66 | ||
67 | static inline int raw_cpu_to_cluster(int cpu) | |
68 | { | |
69 | return topology_physical_package_id(cpu); | |
70 | } | |
71 | ||
72 | static inline int cpu_to_cluster(int cpu) | |
73 | { | |
74 | return is_bL_switching_enabled() ? | |
75 | MAX_CLUSTERS : raw_cpu_to_cluster(cpu); | |
76 | } | |
77 | ||
78 | static unsigned int find_cluster_maxfreq(int cluster) | |
79 | { | |
80 | int j; | |
81 | u32 max_freq = 0, cpu_freq; | |
82 | ||
83 | for_each_online_cpu(j) { | |
84 | cpu_freq = per_cpu(cpu_last_req_freq, j); | |
85 | ||
86 | if ((cluster == per_cpu(physical_cluster, j)) && | |
87 | (max_freq < cpu_freq)) | |
88 | max_freq = cpu_freq; | |
89 | } | |
90 | ||
91 | pr_debug("%s: cluster: %d, max freq: %d\n", __func__, cluster, | |
92 | max_freq); | |
93 | ||
94 | return max_freq; | |
95 | } | |
96 | ||
97 | static unsigned int clk_get_cpu_rate(unsigned int cpu) | |
98 | { | |
99 | u32 cur_cluster = per_cpu(physical_cluster, cpu); | |
100 | u32 rate = clk_get_rate(clk[cur_cluster]) / 1000; | |
101 | ||
102 | /* For switcher we use virtual A7 clock rates */ | |
103 | if (is_bL_switching_enabled()) | |
104 | rate = VIRT_FREQ(cur_cluster, rate); | |
105 | ||
106 | pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__, cpu, | |
107 | cur_cluster, rate); | |
108 | ||
109 | return rate; | |
110 | } | |
111 | ||
112 | static unsigned int bL_cpufreq_get_rate(unsigned int cpu) | |
113 | { | |
114 | if (is_bL_switching_enabled()) { | |
115 | pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq, | |
116 | cpu)); | |
117 | ||
118 | return per_cpu(cpu_last_req_freq, cpu); | |
119 | } else { | |
120 | return clk_get_cpu_rate(cpu); | |
121 | } | |
122 | } | |
8a67f0ef | 123 | |
e79a23c5 VK |
124 | static unsigned int |
125 | bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate) | |
8a67f0ef | 126 | { |
e79a23c5 VK |
127 | u32 new_rate, prev_rate; |
128 | int ret; | |
129 | bool bLs = is_bL_switching_enabled(); | |
130 | ||
131 | mutex_lock(&cluster_lock[new_cluster]); | |
132 | ||
133 | if (bLs) { | |
134 | prev_rate = per_cpu(cpu_last_req_freq, cpu); | |
135 | per_cpu(cpu_last_req_freq, cpu) = rate; | |
136 | per_cpu(physical_cluster, cpu) = new_cluster; | |
137 | ||
138 | new_rate = find_cluster_maxfreq(new_cluster); | |
139 | new_rate = ACTUAL_FREQ(new_cluster, new_rate); | |
140 | } else { | |
141 | new_rate = rate; | |
142 | } | |
143 | ||
144 | pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d, freq: %d\n", | |
145 | __func__, cpu, old_cluster, new_cluster, new_rate); | |
146 | ||
147 | ret = clk_set_rate(clk[new_cluster], new_rate * 1000); | |
148 | if (WARN_ON(ret)) { | |
149 | pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret, | |
150 | new_cluster); | |
151 | if (bLs) { | |
152 | per_cpu(cpu_last_req_freq, cpu) = prev_rate; | |
153 | per_cpu(physical_cluster, cpu) = old_cluster; | |
154 | } | |
155 | ||
156 | mutex_unlock(&cluster_lock[new_cluster]); | |
157 | ||
158 | return ret; | |
159 | } | |
160 | ||
161 | mutex_unlock(&cluster_lock[new_cluster]); | |
162 | ||
163 | /* Recalc freq for old cluster when switching clusters */ | |
164 | if (old_cluster != new_cluster) { | |
165 | pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d\n", | |
166 | __func__, cpu, old_cluster, new_cluster); | |
167 | ||
168 | /* Switch cluster */ | |
169 | bL_switch_request(cpu, new_cluster); | |
170 | ||
171 | mutex_lock(&cluster_lock[old_cluster]); | |
8a67f0ef | 172 | |
e79a23c5 VK |
173 | /* Set freq of old cluster if there are cpus left on it */ |
174 | new_rate = find_cluster_maxfreq(old_cluster); | |
175 | new_rate = ACTUAL_FREQ(old_cluster, new_rate); | |
176 | ||
177 | if (new_rate) { | |
178 | pr_debug("%s: Updating rate of old cluster: %d, to freq: %d\n", | |
179 | __func__, old_cluster, new_rate); | |
180 | ||
181 | if (clk_set_rate(clk[old_cluster], new_rate * 1000)) | |
182 | pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n", | |
183 | __func__, ret, old_cluster); | |
184 | } | |
185 | mutex_unlock(&cluster_lock[old_cluster]); | |
186 | } | |
187 | ||
188 | return 0; | |
8a67f0ef VK |
189 | } |
190 | ||
8a67f0ef VK |
191 | /* Set clock frequency */ |
192 | static int bL_cpufreq_set_target(struct cpufreq_policy *policy, | |
9c0ebcf7 | 193 | unsigned int index) |
8a67f0ef | 194 | { |
e79a23c5 | 195 | u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster; |
d4019f0a | 196 | unsigned int freqs_new; |
8a67f0ef | 197 | |
e79a23c5 VK |
198 | cur_cluster = cpu_to_cluster(cpu); |
199 | new_cluster = actual_cluster = per_cpu(physical_cluster, cpu); | |
8a67f0ef | 200 | |
d4019f0a | 201 | freqs_new = freq_table[cur_cluster][index].frequency; |
8a67f0ef | 202 | |
e79a23c5 VK |
203 | if (is_bL_switching_enabled()) { |
204 | if ((actual_cluster == A15_CLUSTER) && | |
d4019f0a | 205 | (freqs_new < clk_big_min)) { |
e79a23c5 VK |
206 | new_cluster = A7_CLUSTER; |
207 | } else if ((actual_cluster == A7_CLUSTER) && | |
d4019f0a | 208 | (freqs_new > clk_little_max)) { |
e79a23c5 VK |
209 | new_cluster = A15_CLUSTER; |
210 | } | |
211 | } | |
212 | ||
d4019f0a | 213 | return bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new); |
8a67f0ef VK |
214 | } |
215 | ||
e79a23c5 VK |
216 | static inline u32 get_table_count(struct cpufreq_frequency_table *table) |
217 | { | |
218 | int count; | |
219 | ||
220 | for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++) | |
221 | ; | |
222 | ||
223 | return count; | |
224 | } | |
225 | ||
226 | /* get the minimum frequency in the cpufreq_frequency_table */ | |
227 | static inline u32 get_table_min(struct cpufreq_frequency_table *table) | |
228 | { | |
041526f9 | 229 | struct cpufreq_frequency_table *pos; |
e79a23c5 | 230 | uint32_t min_freq = ~0; |
041526f9 SK |
231 | cpufreq_for_each_entry(pos, table) |
232 | if (pos->frequency < min_freq) | |
233 | min_freq = pos->frequency; | |
e79a23c5 VK |
234 | return min_freq; |
235 | } | |
236 | ||
237 | /* get the maximum frequency in the cpufreq_frequency_table */ | |
238 | static inline u32 get_table_max(struct cpufreq_frequency_table *table) | |
239 | { | |
041526f9 | 240 | struct cpufreq_frequency_table *pos; |
e79a23c5 | 241 | uint32_t max_freq = 0; |
041526f9 SK |
242 | cpufreq_for_each_entry(pos, table) |
243 | if (pos->frequency > max_freq) | |
244 | max_freq = pos->frequency; | |
e79a23c5 VK |
245 | return max_freq; |
246 | } | |
247 | ||
248 | static int merge_cluster_tables(void) | |
249 | { | |
250 | int i, j, k = 0, count = 1; | |
251 | struct cpufreq_frequency_table *table; | |
252 | ||
253 | for (i = 0; i < MAX_CLUSTERS; i++) | |
254 | count += get_table_count(freq_table[i]); | |
255 | ||
256 | table = kzalloc(sizeof(*table) * count, GFP_KERNEL); | |
257 | if (!table) | |
258 | return -ENOMEM; | |
259 | ||
260 | freq_table[MAX_CLUSTERS] = table; | |
261 | ||
262 | /* Add in reverse order to get freqs in increasing order */ | |
263 | for (i = MAX_CLUSTERS - 1; i >= 0; i--) { | |
264 | for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END; | |
265 | j++) { | |
266 | table[k].frequency = VIRT_FREQ(i, | |
267 | freq_table[i][j].frequency); | |
268 | pr_debug("%s: index: %d, freq: %d\n", __func__, k, | |
269 | table[k].frequency); | |
270 | k++; | |
271 | } | |
272 | } | |
273 | ||
274 | table[k].driver_data = k; | |
275 | table[k].frequency = CPUFREQ_TABLE_END; | |
276 | ||
277 | pr_debug("%s: End, table: %p, count: %d\n", __func__, table, k); | |
278 | ||
279 | return 0; | |
280 | } | |
281 | ||
282 | static void _put_cluster_clk_and_freq_table(struct device *cpu_dev) | |
283 | { | |
284 | u32 cluster = raw_cpu_to_cluster(cpu_dev->id); | |
285 | ||
286 | if (!freq_table[cluster]) | |
287 | return; | |
288 | ||
289 | clk_put(clk[cluster]); | |
290 | dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); | |
291 | dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster); | |
292 | } | |
293 | ||
8a67f0ef VK |
294 | static void put_cluster_clk_and_freq_table(struct device *cpu_dev) |
295 | { | |
296 | u32 cluster = cpu_to_cluster(cpu_dev->id); | |
e79a23c5 VK |
297 | int i; |
298 | ||
299 | if (atomic_dec_return(&cluster_usage[cluster])) | |
300 | return; | |
301 | ||
302 | if (cluster < MAX_CLUSTERS) | |
303 | return _put_cluster_clk_and_freq_table(cpu_dev); | |
8a67f0ef | 304 | |
e79a23c5 VK |
305 | for_each_present_cpu(i) { |
306 | struct device *cdev = get_cpu_device(i); | |
307 | if (!cdev) { | |
308 | pr_err("%s: failed to get cpu%d device\n", __func__, i); | |
309 | return; | |
310 | } | |
311 | ||
312 | _put_cluster_clk_and_freq_table(cdev); | |
8a67f0ef | 313 | } |
e79a23c5 VK |
314 | |
315 | /* free virtual table */ | |
316 | kfree(freq_table[cluster]); | |
8a67f0ef VK |
317 | } |
318 | ||
e79a23c5 | 319 | static int _get_cluster_clk_and_freq_table(struct device *cpu_dev) |
8a67f0ef | 320 | { |
e79a23c5 | 321 | u32 cluster = raw_cpu_to_cluster(cpu_dev->id); |
8a67f0ef VK |
322 | char name[14] = "cpu-cluster."; |
323 | int ret; | |
324 | ||
e79a23c5 | 325 | if (freq_table[cluster]) |
8a67f0ef VK |
326 | return 0; |
327 | ||
328 | ret = arm_bL_ops->init_opp_table(cpu_dev); | |
329 | if (ret) { | |
330 | dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n", | |
331 | __func__, cpu_dev->id, ret); | |
e79a23c5 | 332 | goto out; |
8a67f0ef VK |
333 | } |
334 | ||
5d4879cd | 335 | ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]); |
8a67f0ef VK |
336 | if (ret) { |
337 | dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n", | |
338 | __func__, cpu_dev->id, ret); | |
e79a23c5 | 339 | goto out; |
8a67f0ef VK |
340 | } |
341 | ||
342 | name[12] = cluster + '0'; | |
076dec90 | 343 | clk[cluster] = clk_get(cpu_dev, name); |
8a67f0ef VK |
344 | if (!IS_ERR(clk[cluster])) { |
345 | dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n", | |
346 | __func__, clk[cluster], freq_table[cluster], | |
347 | cluster); | |
348 | return 0; | |
349 | } | |
350 | ||
351 | dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n", | |
352 | __func__, cpu_dev->id, cluster); | |
353 | ret = PTR_ERR(clk[cluster]); | |
5d4879cd | 354 | dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); |
8a67f0ef | 355 | |
e79a23c5 | 356 | out: |
8a67f0ef VK |
357 | dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__, |
358 | cluster); | |
359 | return ret; | |
360 | } | |
361 | ||
e79a23c5 VK |
362 | static int get_cluster_clk_and_freq_table(struct device *cpu_dev) |
363 | { | |
364 | u32 cluster = cpu_to_cluster(cpu_dev->id); | |
365 | int i, ret; | |
366 | ||
367 | if (atomic_inc_return(&cluster_usage[cluster]) != 1) | |
368 | return 0; | |
369 | ||
370 | if (cluster < MAX_CLUSTERS) { | |
371 | ret = _get_cluster_clk_and_freq_table(cpu_dev); | |
372 | if (ret) | |
373 | atomic_dec(&cluster_usage[cluster]); | |
374 | return ret; | |
375 | } | |
376 | ||
377 | /* | |
378 | * Get data for all clusters and fill virtual cluster with a merge of | |
379 | * both | |
380 | */ | |
381 | for_each_present_cpu(i) { | |
382 | struct device *cdev = get_cpu_device(i); | |
383 | if (!cdev) { | |
384 | pr_err("%s: failed to get cpu%d device\n", __func__, i); | |
385 | return -ENODEV; | |
386 | } | |
387 | ||
388 | ret = _get_cluster_clk_and_freq_table(cdev); | |
389 | if (ret) | |
390 | goto put_clusters; | |
391 | } | |
392 | ||
393 | ret = merge_cluster_tables(); | |
394 | if (ret) | |
395 | goto put_clusters; | |
396 | ||
397 | /* Assuming 2 cluster, set clk_big_min and clk_little_max */ | |
398 | clk_big_min = get_table_min(freq_table[0]); | |
399 | clk_little_max = VIRT_FREQ(1, get_table_max(freq_table[1])); | |
400 | ||
401 | pr_debug("%s: cluster: %d, clk_big_min: %d, clk_little_max: %d\n", | |
402 | __func__, cluster, clk_big_min, clk_little_max); | |
403 | ||
404 | return 0; | |
405 | ||
406 | put_clusters: | |
407 | for_each_present_cpu(i) { | |
408 | struct device *cdev = get_cpu_device(i); | |
409 | if (!cdev) { | |
410 | pr_err("%s: failed to get cpu%d device\n", __func__, i); | |
411 | return -ENODEV; | |
412 | } | |
413 | ||
414 | _put_cluster_clk_and_freq_table(cdev); | |
415 | } | |
416 | ||
417 | atomic_dec(&cluster_usage[cluster]); | |
418 | ||
419 | return ret; | |
420 | } | |
421 | ||
8a67f0ef VK |
422 | /* Per-CPU initialization */ |
423 | static int bL_cpufreq_init(struct cpufreq_policy *policy) | |
424 | { | |
425 | u32 cur_cluster = cpu_to_cluster(policy->cpu); | |
426 | struct device *cpu_dev; | |
427 | int ret; | |
428 | ||
429 | cpu_dev = get_cpu_device(policy->cpu); | |
430 | if (!cpu_dev) { | |
431 | pr_err("%s: failed to get cpu%d device\n", __func__, | |
432 | policy->cpu); | |
433 | return -ENODEV; | |
434 | } | |
435 | ||
436 | ret = get_cluster_clk_and_freq_table(cpu_dev); | |
437 | if (ret) | |
438 | return ret; | |
439 | ||
39b10ebe | 440 | ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]); |
8a67f0ef VK |
441 | if (ret) { |
442 | dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n", | |
443 | policy->cpu, cur_cluster); | |
444 | put_cluster_clk_and_freq_table(cpu_dev); | |
445 | return ret; | |
446 | } | |
447 | ||
e79a23c5 | 448 | if (cur_cluster < MAX_CLUSTERS) { |
8f3ba3d3 | 449 | int cpu; |
450 | ||
e79a23c5 VK |
451 | cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); |
452 | ||
8f3ba3d3 | 453 | for_each_cpu(cpu, policy->cpus) |
454 | per_cpu(physical_cluster, cpu) = cur_cluster; | |
e79a23c5 VK |
455 | } else { |
456 | /* Assumption: during init, we are always running on A15 */ | |
457 | per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER; | |
458 | } | |
459 | ||
8a67f0ef VK |
460 | if (arm_bL_ops->get_transition_latency) |
461 | policy->cpuinfo.transition_latency = | |
462 | arm_bL_ops->get_transition_latency(cpu_dev); | |
463 | else | |
464 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | |
465 | ||
e79a23c5 VK |
466 | if (is_bL_switching_enabled()) |
467 | per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu); | |
8a67f0ef | 468 | |
2b80f313 | 469 | dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu); |
8a67f0ef VK |
470 | return 0; |
471 | } | |
472 | ||
473 | static int bL_cpufreq_exit(struct cpufreq_policy *policy) | |
474 | { | |
475 | struct device *cpu_dev; | |
476 | ||
477 | cpu_dev = get_cpu_device(policy->cpu); | |
478 | if (!cpu_dev) { | |
479 | pr_err("%s: failed to get cpu%d device\n", __func__, | |
480 | policy->cpu); | |
481 | return -ENODEV; | |
482 | } | |
483 | ||
484 | put_cluster_clk_and_freq_table(cpu_dev); | |
485 | dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu); | |
486 | ||
487 | return 0; | |
488 | } | |
489 | ||
8a67f0ef VK |
490 | static struct cpufreq_driver bL_cpufreq_driver = { |
491 | .name = "arm-big-little", | |
0b981e70 | 492 | .flags = CPUFREQ_STICKY | |
ae6b4271 VK |
493 | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | |
494 | CPUFREQ_NEED_INITIAL_FREQ_CHECK, | |
3c75a150 | 495 | .verify = cpufreq_generic_frequency_table_verify, |
9c0ebcf7 | 496 | .target_index = bL_cpufreq_set_target, |
e79a23c5 | 497 | .get = bL_cpufreq_get_rate, |
8a67f0ef VK |
498 | .init = bL_cpufreq_init, |
499 | .exit = bL_cpufreq_exit, | |
3c75a150 | 500 | .attr = cpufreq_generic_attr, |
8a67f0ef VK |
501 | }; |
502 | ||
45cac118 NP |
503 | static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb, |
504 | unsigned long action, void *_arg) | |
505 | { | |
506 | pr_debug("%s: action: %ld\n", __func__, action); | |
507 | ||
508 | switch (action) { | |
509 | case BL_NOTIFY_PRE_ENABLE: | |
510 | case BL_NOTIFY_PRE_DISABLE: | |
511 | cpufreq_unregister_driver(&bL_cpufreq_driver); | |
512 | break; | |
513 | ||
514 | case BL_NOTIFY_POST_ENABLE: | |
515 | set_switching_enabled(true); | |
516 | cpufreq_register_driver(&bL_cpufreq_driver); | |
517 | break; | |
518 | ||
519 | case BL_NOTIFY_POST_DISABLE: | |
520 | set_switching_enabled(false); | |
521 | cpufreq_register_driver(&bL_cpufreq_driver); | |
522 | break; | |
523 | ||
524 | default: | |
525 | return NOTIFY_DONE; | |
526 | } | |
527 | ||
528 | return NOTIFY_OK; | |
529 | } | |
530 | ||
531 | static struct notifier_block bL_switcher_notifier = { | |
532 | .notifier_call = bL_cpufreq_switcher_notifier, | |
533 | }; | |
534 | ||
8a67f0ef VK |
535 | int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops) |
536 | { | |
e79a23c5 | 537 | int ret, i; |
8a67f0ef VK |
538 | |
539 | if (arm_bL_ops) { | |
540 | pr_debug("%s: Already registered: %s, exiting\n", __func__, | |
541 | arm_bL_ops->name); | |
542 | return -EBUSY; | |
543 | } | |
544 | ||
545 | if (!ops || !strlen(ops->name) || !ops->init_opp_table) { | |
546 | pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__); | |
547 | return -ENODEV; | |
548 | } | |
549 | ||
550 | arm_bL_ops = ops; | |
551 | ||
45cac118 NP |
552 | ret = bL_switcher_get_enabled(); |
553 | set_switching_enabled(ret); | |
554 | ||
e79a23c5 VK |
555 | for (i = 0; i < MAX_CLUSTERS; i++) |
556 | mutex_init(&cluster_lock[i]); | |
557 | ||
8a67f0ef VK |
558 | ret = cpufreq_register_driver(&bL_cpufreq_driver); |
559 | if (ret) { | |
560 | pr_info("%s: Failed registering platform driver: %s, err: %d\n", | |
561 | __func__, ops->name, ret); | |
562 | arm_bL_ops = NULL; | |
563 | } else { | |
45cac118 NP |
564 | ret = bL_switcher_register_notifier(&bL_switcher_notifier); |
565 | if (ret) { | |
566 | cpufreq_unregister_driver(&bL_cpufreq_driver); | |
567 | arm_bL_ops = NULL; | |
568 | } else { | |
569 | pr_info("%s: Registered platform driver: %s\n", | |
570 | __func__, ops->name); | |
571 | } | |
8a67f0ef VK |
572 | } |
573 | ||
45cac118 | 574 | bL_switcher_put_enabled(); |
8a67f0ef VK |
575 | return ret; |
576 | } | |
577 | EXPORT_SYMBOL_GPL(bL_cpufreq_register); | |
578 | ||
579 | void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops) | |
580 | { | |
581 | if (arm_bL_ops != ops) { | |
582 | pr_err("%s: Registered with: %s, can't unregister, exiting\n", | |
583 | __func__, arm_bL_ops->name); | |
584 | return; | |
585 | } | |
586 | ||
45cac118 NP |
587 | bL_switcher_get_enabled(); |
588 | bL_switcher_unregister_notifier(&bL_switcher_notifier); | |
8a67f0ef | 589 | cpufreq_unregister_driver(&bL_cpufreq_driver); |
45cac118 | 590 | bL_switcher_put_enabled(); |
8a67f0ef VK |
591 | pr_info("%s: Un-registered platform driver: %s\n", __func__, |
592 | arm_bL_ops->name); | |
593 | arm_bL_ops = NULL; | |
594 | } | |
595 | EXPORT_SYMBOL_GPL(bL_cpufreq_unregister); |