Commit | Line | Data |
---|---|---|
a0f950d3 | 1 | // SPDX-License-Identifier: GPL-2.0 |
47ac9aa1 SK |
2 | /* |
3 | * Versatile Express SPC CPUFreq Interface driver | |
4 | * | |
a0f950d3 SH |
5 | * Copyright (C) 2013 - 2019 ARM Ltd. |
6 | * Sudeep Holla <sudeep.holla@arm.com> | |
47ac9aa1 | 7 | * |
a0f950d3 SH |
8 | * Copyright (C) 2013 Linaro. |
9 | * Viresh Kumar <viresh.kumar@linaro.org> | |
47ac9aa1 SK |
10 | */ |
11 | ||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
13 | ||
a0f950d3 | 14 | #include <linux/clk.h> |
d9975b0b | 15 | #include <linux/cpu.h> |
47ac9aa1 | 16 | #include <linux/cpufreq.h> |
a0f950d3 | 17 | #include <linux/cpumask.h> |
a0f950d3 | 18 | #include <linux/device.h> |
47ac9aa1 | 19 | #include <linux/module.h> |
a0f950d3 | 20 | #include <linux/mutex.h> |
47ac9aa1 SK |
21 | #include <linux/platform_device.h> |
22 | #include <linux/pm_opp.h> | |
a0f950d3 SH |
23 | #include <linux/slab.h> |
24 | #include <linux/topology.h> | |
47ac9aa1 SK |
25 | #include <linux/types.h> |
26 | ||
a0f950d3 SH |
27 | /* Currently we support only two clusters */ |
28 | #define A15_CLUSTER 0 | |
29 | #define A7_CLUSTER 1 | |
30 | #define MAX_CLUSTERS 2 | |
31 | ||
32 | #ifdef CONFIG_BL_SWITCHER | |
33 | #include <asm/bL_switcher.h> | |
34 | static bool bL_switching_enabled; | |
35 | #define is_bL_switching_enabled() bL_switching_enabled | |
36 | #define set_switching_enabled(x) (bL_switching_enabled = (x)) | |
37 | #else | |
38 | #define is_bL_switching_enabled() false | |
39 | #define set_switching_enabled(x) do { } while (0) | |
40 | #define bL_switch_request(...) do { } while (0) | |
41 | #define bL_switcher_put_enabled() do { } while (0) | |
42 | #define bL_switcher_get_enabled() do { } while (0) | |
43 | #endif | |
44 | ||
45 | #define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq) | |
46 | #define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq) | |
47 | ||
a0f950d3 SH |
48 | static struct clk *clk[MAX_CLUSTERS]; |
49 | static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1]; | |
50 | static atomic_t cluster_usage[MAX_CLUSTERS + 1]; | |
51 | ||
52 | static unsigned int clk_big_min; /* (Big) clock frequencies */ | |
53 | static unsigned int clk_little_max; /* Maximum clock frequency (Little) */ | |
54 | ||
55 | static DEFINE_PER_CPU(unsigned int, physical_cluster); | |
56 | static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq); | |
57 | ||
58 | static struct mutex cluster_lock[MAX_CLUSTERS]; | |
59 | ||
60 | static inline int raw_cpu_to_cluster(int cpu) | |
61 | { | |
62 | return topology_physical_package_id(cpu); | |
63 | } | |
64 | ||
65 | static inline int cpu_to_cluster(int cpu) | |
66 | { | |
67 | return is_bL_switching_enabled() ? | |
68 | MAX_CLUSTERS : raw_cpu_to_cluster(cpu); | |
69 | } | |
70 | ||
71 | static unsigned int find_cluster_maxfreq(int cluster) | |
72 | { | |
73 | int j; | |
74 | u32 max_freq = 0, cpu_freq; | |
75 | ||
76 | for_each_online_cpu(j) { | |
77 | cpu_freq = per_cpu(cpu_last_req_freq, j); | |
78 | ||
e318d2c8 SH |
79 | if (cluster == per_cpu(physical_cluster, j) && |
80 | max_freq < cpu_freq) | |
a0f950d3 SH |
81 | max_freq = cpu_freq; |
82 | } | |
83 | ||
a0f950d3 SH |
84 | return max_freq; |
85 | } | |
86 | ||
87 | static unsigned int clk_get_cpu_rate(unsigned int cpu) | |
88 | { | |
89 | u32 cur_cluster = per_cpu(physical_cluster, cpu); | |
90 | u32 rate = clk_get_rate(clk[cur_cluster]) / 1000; | |
91 | ||
92 | /* For switcher we use virtual A7 clock rates */ | |
93 | if (is_bL_switching_enabled()) | |
94 | rate = VIRT_FREQ(cur_cluster, rate); | |
95 | ||
a0f950d3 SH |
96 | return rate; |
97 | } | |
98 | ||
1f1b4650 | 99 | static unsigned int ve_spc_cpufreq_get_rate(unsigned int cpu) |
a0f950d3 | 100 | { |
09402d57 | 101 | if (is_bL_switching_enabled()) |
a0f950d3 | 102 | return per_cpu(cpu_last_req_freq, cpu); |
09402d57 | 103 | else |
a0f950d3 | 104 | return clk_get_cpu_rate(cpu); |
a0f950d3 SH |
105 | } |
106 | ||
107 | static unsigned int | |
1f1b4650 | 108 | ve_spc_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate) |
a0f950d3 SH |
109 | { |
110 | u32 new_rate, prev_rate; | |
111 | int ret; | |
112 | bool bLs = is_bL_switching_enabled(); | |
113 | ||
114 | mutex_lock(&cluster_lock[new_cluster]); | |
115 | ||
116 | if (bLs) { | |
117 | prev_rate = per_cpu(cpu_last_req_freq, cpu); | |
118 | per_cpu(cpu_last_req_freq, cpu) = rate; | |
119 | per_cpu(physical_cluster, cpu) = new_cluster; | |
120 | ||
121 | new_rate = find_cluster_maxfreq(new_cluster); | |
122 | new_rate = ACTUAL_FREQ(new_cluster, new_rate); | |
123 | } else { | |
124 | new_rate = rate; | |
125 | } | |
126 | ||
a0f950d3 SH |
127 | ret = clk_set_rate(clk[new_cluster], new_rate * 1000); |
128 | if (!ret) { | |
129 | /* | |
130 | * FIXME: clk_set_rate hasn't returned an error here however it | |
131 | * may be that clk_change_rate failed due to hardware or | |
132 | * firmware issues and wasn't able to report that due to the | |
133 | * current design of the clk core layer. To work around this | |
134 | * problem we will read back the clock rate and check it is | |
135 | * correct. This needs to be removed once clk core is fixed. | |
136 | */ | |
137 | if (clk_get_rate(clk[new_cluster]) != new_rate * 1000) | |
138 | ret = -EIO; | |
139 | } | |
140 | ||
141 | if (WARN_ON(ret)) { | |
a0f950d3 SH |
142 | if (bLs) { |
143 | per_cpu(cpu_last_req_freq, cpu) = prev_rate; | |
144 | per_cpu(physical_cluster, cpu) = old_cluster; | |
145 | } | |
146 | ||
147 | mutex_unlock(&cluster_lock[new_cluster]); | |
148 | ||
149 | return ret; | |
150 | } | |
151 | ||
152 | mutex_unlock(&cluster_lock[new_cluster]); | |
153 | ||
154 | /* Recalc freq for old cluster when switching clusters */ | |
155 | if (old_cluster != new_cluster) { | |
a0f950d3 SH |
156 | /* Switch cluster */ |
157 | bL_switch_request(cpu, new_cluster); | |
158 | ||
159 | mutex_lock(&cluster_lock[old_cluster]); | |
160 | ||
161 | /* Set freq of old cluster if there are cpus left on it */ | |
162 | new_rate = find_cluster_maxfreq(old_cluster); | |
163 | new_rate = ACTUAL_FREQ(old_cluster, new_rate); | |
164 | ||
09402d57 SH |
165 | if (new_rate && |
166 | clk_set_rate(clk[old_cluster], new_rate * 1000)) { | |
167 | pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n", | |
168 | __func__, ret, old_cluster); | |
a0f950d3 SH |
169 | } |
170 | mutex_unlock(&cluster_lock[old_cluster]); | |
171 | } | |
172 | ||
173 | return 0; | |
174 | } | |
175 | ||
176 | /* Set clock frequency */ | |
1f1b4650 SH |
177 | static int ve_spc_cpufreq_set_target(struct cpufreq_policy *policy, |
178 | unsigned int index) | |
a0f950d3 SH |
179 | { |
180 | u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster; | |
181 | unsigned int freqs_new; | |
a0f950d3 SH |
182 | |
183 | cur_cluster = cpu_to_cluster(cpu); | |
184 | new_cluster = actual_cluster = per_cpu(physical_cluster, cpu); | |
185 | ||
186 | freqs_new = freq_table[cur_cluster][index].frequency; | |
187 | ||
188 | if (is_bL_switching_enabled()) { | |
e318d2c8 | 189 | if (actual_cluster == A15_CLUSTER && freqs_new < clk_big_min) |
a0f950d3 | 190 | new_cluster = A7_CLUSTER; |
e318d2c8 SH |
191 | else if (actual_cluster == A7_CLUSTER && |
192 | freqs_new > clk_little_max) | |
a0f950d3 | 193 | new_cluster = A15_CLUSTER; |
a0f950d3 SH |
194 | } |
195 | ||
1a0419b0 IV |
196 | return ve_spc_cpufreq_set_rate(cpu, actual_cluster, new_cluster, |
197 | freqs_new); | |
a0f950d3 SH |
198 | } |
199 | ||
200 | static inline u32 get_table_count(struct cpufreq_frequency_table *table) | |
201 | { | |
202 | int count; | |
203 | ||
204 | for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++) | |
205 | ; | |
206 | ||
207 | return count; | |
208 | } | |
209 | ||
210 | /* get the minimum frequency in the cpufreq_frequency_table */ | |
211 | static inline u32 get_table_min(struct cpufreq_frequency_table *table) | |
212 | { | |
213 | struct cpufreq_frequency_table *pos; | |
e318d2c8 SH |
214 | u32 min_freq = ~0; |
215 | ||
a0f950d3 SH |
216 | cpufreq_for_each_entry(pos, table) |
217 | if (pos->frequency < min_freq) | |
218 | min_freq = pos->frequency; | |
219 | return min_freq; | |
220 | } | |
221 | ||
222 | /* get the maximum frequency in the cpufreq_frequency_table */ | |
223 | static inline u32 get_table_max(struct cpufreq_frequency_table *table) | |
224 | { | |
225 | struct cpufreq_frequency_table *pos; | |
e318d2c8 SH |
226 | u32 max_freq = 0; |
227 | ||
a0f950d3 SH |
228 | cpufreq_for_each_entry(pos, table) |
229 | if (pos->frequency > max_freq) | |
230 | max_freq = pos->frequency; | |
231 | return max_freq; | |
232 | } | |
233 | ||
e32beb06 SH |
234 | static bool search_frequency(struct cpufreq_frequency_table *table, int size, |
235 | unsigned int freq) | |
236 | { | |
237 | int count; | |
238 | ||
239 | for (count = 0; count < size; count++) { | |
240 | if (table[count].frequency == freq) | |
241 | return true; | |
242 | } | |
243 | ||
244 | return false; | |
245 | } | |
246 | ||
a0f950d3 SH |
247 | static int merge_cluster_tables(void) |
248 | { | |
249 | int i, j, k = 0, count = 1; | |
250 | struct cpufreq_frequency_table *table; | |
251 | ||
252 | for (i = 0; i < MAX_CLUSTERS; i++) | |
253 | count += get_table_count(freq_table[i]); | |
254 | ||
255 | table = kcalloc(count, sizeof(*table), GFP_KERNEL); | |
256 | if (!table) | |
257 | return -ENOMEM; | |
258 | ||
259 | freq_table[MAX_CLUSTERS] = table; | |
260 | ||
261 | /* Add in reverse order to get freqs in increasing order */ | |
e32beb06 | 262 | for (i = MAX_CLUSTERS - 1; i >= 0; i--, count = k) { |
a0f950d3 | 263 | for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END; |
e32beb06 SH |
264 | j++) { |
265 | if (i == A15_CLUSTER && | |
266 | search_frequency(table, count, freq_table[i][j].frequency)) | |
267 | continue; /* skip duplicates */ | |
268 | table[k++].frequency = | |
e318d2c8 | 269 | VIRT_FREQ(i, freq_table[i][j].frequency); |
a0f950d3 SH |
270 | } |
271 | } | |
272 | ||
273 | table[k].driver_data = k; | |
274 | table[k].frequency = CPUFREQ_TABLE_END; | |
275 | ||
a0f950d3 SH |
276 | return 0; |
277 | } | |
278 | ||
279 | static void _put_cluster_clk_and_freq_table(struct device *cpu_dev, | |
280 | const struct cpumask *cpumask) | |
281 | { | |
282 | u32 cluster = raw_cpu_to_cluster(cpu_dev->id); | |
283 | ||
284 | if (!freq_table[cluster]) | |
285 | return; | |
286 | ||
287 | clk_put(clk[cluster]); | |
288 | dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); | |
a0f950d3 SH |
289 | } |
290 | ||
291 | static void put_cluster_clk_and_freq_table(struct device *cpu_dev, | |
292 | const struct cpumask *cpumask) | |
293 | { | |
294 | u32 cluster = cpu_to_cluster(cpu_dev->id); | |
295 | int i; | |
296 | ||
297 | if (atomic_dec_return(&cluster_usage[cluster])) | |
298 | return; | |
299 | ||
300 | if (cluster < MAX_CLUSTERS) | |
301 | return _put_cluster_clk_and_freq_table(cpu_dev, cpumask); | |
302 | ||
303 | for_each_present_cpu(i) { | |
304 | struct device *cdev = get_cpu_device(i); | |
09402d57 SH |
305 | |
306 | if (!cdev) | |
a0f950d3 | 307 | return; |
a0f950d3 SH |
308 | |
309 | _put_cluster_clk_and_freq_table(cdev, cpumask); | |
310 | } | |
311 | ||
312 | /* free virtual table */ | |
313 | kfree(freq_table[cluster]); | |
314 | } | |
315 | ||
316 | static int _get_cluster_clk_and_freq_table(struct device *cpu_dev, | |
317 | const struct cpumask *cpumask) | |
318 | { | |
319 | u32 cluster = raw_cpu_to_cluster(cpu_dev->id); | |
320 | int ret; | |
321 | ||
322 | if (freq_table[cluster]) | |
323 | return 0; | |
324 | ||
1f1b4650 SH |
325 | /* |
326 | * platform specific SPC code must initialise the opp table | |
327 | * so just check if the OPP count is non-zero | |
328 | */ | |
329 | ret = dev_pm_opp_get_opp_count(cpu_dev) <= 0; | |
330 | if (ret) | |
a0f950d3 | 331 | goto out; |
a0f950d3 SH |
332 | |
333 | ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]); | |
09402d57 | 334 | if (ret) |
1f1b4650 | 335 | goto out; |
a0f950d3 SH |
336 | |
337 | clk[cluster] = clk_get(cpu_dev, NULL); | |
09402d57 | 338 | if (!IS_ERR(clk[cluster])) |
a0f950d3 | 339 | return 0; |
a0f950d3 SH |
340 | |
341 | dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n", | |
e318d2c8 | 342 | __func__, cpu_dev->id, cluster); |
a0f950d3 SH |
343 | ret = PTR_ERR(clk[cluster]); |
344 | dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); | |
345 | ||
a0f950d3 SH |
346 | out: |
347 | dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__, | |
e318d2c8 | 348 | cluster); |
a0f950d3 SH |
349 | return ret; |
350 | } | |
351 | ||
352 | static int get_cluster_clk_and_freq_table(struct device *cpu_dev, | |
353 | const struct cpumask *cpumask) | |
354 | { | |
355 | u32 cluster = cpu_to_cluster(cpu_dev->id); | |
356 | int i, ret; | |
357 | ||
358 | if (atomic_inc_return(&cluster_usage[cluster]) != 1) | |
359 | return 0; | |
360 | ||
361 | if (cluster < MAX_CLUSTERS) { | |
362 | ret = _get_cluster_clk_and_freq_table(cpu_dev, cpumask); | |
363 | if (ret) | |
364 | atomic_dec(&cluster_usage[cluster]); | |
365 | return ret; | |
366 | } | |
367 | ||
368 | /* | |
369 | * Get data for all clusters and fill virtual cluster with a merge of | |
370 | * both | |
371 | */ | |
372 | for_each_present_cpu(i) { | |
373 | struct device *cdev = get_cpu_device(i); | |
09402d57 SH |
374 | |
375 | if (!cdev) | |
a0f950d3 | 376 | return -ENODEV; |
a0f950d3 SH |
377 | |
378 | ret = _get_cluster_clk_and_freq_table(cdev, cpumask); | |
379 | if (ret) | |
380 | goto put_clusters; | |
381 | } | |
382 | ||
383 | ret = merge_cluster_tables(); | |
384 | if (ret) | |
385 | goto put_clusters; | |
386 | ||
387 | /* Assuming 2 cluster, set clk_big_min and clk_little_max */ | |
4a6e1352 SH |
388 | clk_big_min = get_table_min(freq_table[A15_CLUSTER]); |
389 | clk_little_max = VIRT_FREQ(A7_CLUSTER, | |
390 | get_table_max(freq_table[A7_CLUSTER])); | |
a0f950d3 | 391 | |
a0f950d3 SH |
392 | return 0; |
393 | ||
394 | put_clusters: | |
395 | for_each_present_cpu(i) { | |
396 | struct device *cdev = get_cpu_device(i); | |
09402d57 SH |
397 | |
398 | if (!cdev) | |
a0f950d3 | 399 | return -ENODEV; |
a0f950d3 SH |
400 | |
401 | _put_cluster_clk_and_freq_table(cdev, cpumask); | |
402 | } | |
403 | ||
404 | atomic_dec(&cluster_usage[cluster]); | |
405 | ||
406 | return ret; | |
407 | } | |
408 | ||
409 | /* Per-CPU initialization */ | |
1f1b4650 | 410 | static int ve_spc_cpufreq_init(struct cpufreq_policy *policy) |
a0f950d3 SH |
411 | { |
412 | u32 cur_cluster = cpu_to_cluster(policy->cpu); | |
413 | struct device *cpu_dev; | |
414 | int ret; | |
415 | ||
416 | cpu_dev = get_cpu_device(policy->cpu); | |
417 | if (!cpu_dev) { | |
418 | pr_err("%s: failed to get cpu%d device\n", __func__, | |
e318d2c8 | 419 | policy->cpu); |
a0f950d3 SH |
420 | return -ENODEV; |
421 | } | |
422 | ||
423 | if (cur_cluster < MAX_CLUSTERS) { | |
424 | int cpu; | |
425 | ||
c9385887 | 426 | dev_pm_opp_get_sharing_cpus(cpu_dev, policy->cpus); |
a0f950d3 SH |
427 | |
428 | for_each_cpu(cpu, policy->cpus) | |
429 | per_cpu(physical_cluster, cpu) = cur_cluster; | |
430 | } else { | |
431 | /* Assumption: during init, we are always running on A15 */ | |
432 | per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER; | |
433 | } | |
434 | ||
435 | ret = get_cluster_clk_and_freq_table(cpu_dev, policy->cpus); | |
436 | if (ret) | |
437 | return ret; | |
438 | ||
439 | policy->freq_table = freq_table[cur_cluster]; | |
1f1b4650 | 440 | policy->cpuinfo.transition_latency = 1000000; /* 1 ms */ |
a0f950d3 | 441 | |
a0f950d3 | 442 | if (is_bL_switching_enabled()) |
e318d2c8 SH |
443 | per_cpu(cpu_last_req_freq, policy->cpu) = |
444 | clk_get_cpu_rate(policy->cpu); | |
a0f950d3 SH |
445 | |
446 | dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu); | |
447 | return 0; | |
448 | } | |
449 | ||
1f1b4650 | 450 | static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy) |
a0f950d3 SH |
451 | { |
452 | struct device *cpu_dev; | |
a0f950d3 | 453 | |
a0f950d3 SH |
454 | cpu_dev = get_cpu_device(policy->cpu); |
455 | if (!cpu_dev) { | |
456 | pr_err("%s: failed to get cpu%d device\n", __func__, | |
e318d2c8 | 457 | policy->cpu); |
a0f950d3 SH |
458 | return -ENODEV; |
459 | } | |
460 | ||
461 | put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus); | |
a0f950d3 SH |
462 | return 0; |
463 | } | |
464 | ||
1f1b4650 SH |
465 | static struct cpufreq_driver ve_spc_cpufreq_driver = { |
466 | .name = "vexpress-spc", | |
5ae4a4b4 | 467 | .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY | |
a0f950d3 SH |
468 | CPUFREQ_NEED_INITIAL_FREQ_CHECK, |
469 | .verify = cpufreq_generic_frequency_table_verify, | |
1f1b4650 SH |
470 | .target_index = ve_spc_cpufreq_set_target, |
471 | .get = ve_spc_cpufreq_get_rate, | |
472 | .init = ve_spc_cpufreq_init, | |
473 | .exit = ve_spc_cpufreq_exit, | |
3fd23111 | 474 | .register_em = cpufreq_register_em_with_opp, |
a0f950d3 SH |
475 | .attr = cpufreq_generic_attr, |
476 | }; | |
477 | ||
478 | #ifdef CONFIG_BL_SWITCHER | |
479 | static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb, | |
480 | unsigned long action, void *_arg) | |
481 | { | |
482 | pr_debug("%s: action: %ld\n", __func__, action); | |
483 | ||
484 | switch (action) { | |
485 | case BL_NOTIFY_PRE_ENABLE: | |
486 | case BL_NOTIFY_PRE_DISABLE: | |
1f1b4650 | 487 | cpufreq_unregister_driver(&ve_spc_cpufreq_driver); |
a0f950d3 SH |
488 | break; |
489 | ||
490 | case BL_NOTIFY_POST_ENABLE: | |
491 | set_switching_enabled(true); | |
1f1b4650 | 492 | cpufreq_register_driver(&ve_spc_cpufreq_driver); |
a0f950d3 SH |
493 | break; |
494 | ||
495 | case BL_NOTIFY_POST_DISABLE: | |
496 | set_switching_enabled(false); | |
1f1b4650 | 497 | cpufreq_register_driver(&ve_spc_cpufreq_driver); |
a0f950d3 SH |
498 | break; |
499 | ||
500 | default: | |
501 | return NOTIFY_DONE; | |
502 | } | |
503 | ||
504 | return NOTIFY_OK; | |
505 | } | |
506 | ||
507 | static struct notifier_block bL_switcher_notifier = { | |
508 | .notifier_call = bL_cpufreq_switcher_notifier, | |
509 | }; | |
510 | ||
511 | static int __bLs_register_notifier(void) | |
512 | { | |
513 | return bL_switcher_register_notifier(&bL_switcher_notifier); | |
514 | } | |
515 | ||
516 | static int __bLs_unregister_notifier(void) | |
517 | { | |
518 | return bL_switcher_unregister_notifier(&bL_switcher_notifier); | |
519 | } | |
520 | #else | |
521 | static int __bLs_register_notifier(void) { return 0; } | |
522 | static int __bLs_unregister_notifier(void) { return 0; } | |
523 | #endif | |
524 | ||
1f1b4650 | 525 | static int ve_spc_cpufreq_probe(struct platform_device *pdev) |
a0f950d3 SH |
526 | { |
527 | int ret, i; | |
528 | ||
a0f950d3 SH |
529 | set_switching_enabled(bL_switcher_get_enabled()); |
530 | ||
531 | for (i = 0; i < MAX_CLUSTERS; i++) | |
532 | mutex_init(&cluster_lock[i]); | |
533 | ||
bb8c26d9 VK |
534 | if (!is_bL_switching_enabled()) |
535 | ve_spc_cpufreq_driver.flags |= CPUFREQ_IS_COOLING_DEV; | |
536 | ||
1f1b4650 | 537 | ret = cpufreq_register_driver(&ve_spc_cpufreq_driver); |
a0f950d3 SH |
538 | if (ret) { |
539 | pr_info("%s: Failed registering platform driver: %s, err: %d\n", | |
1f1b4650 | 540 | __func__, ve_spc_cpufreq_driver.name, ret); |
a0f950d3 SH |
541 | } else { |
542 | ret = __bLs_register_notifier(); | |
1f1b4650 SH |
543 | if (ret) |
544 | cpufreq_unregister_driver(&ve_spc_cpufreq_driver); | |
545 | else | |
a0f950d3 | 546 | pr_info("%s: Registered platform driver: %s\n", |
1f1b4650 | 547 | __func__, ve_spc_cpufreq_driver.name); |
a0f950d3 SH |
548 | } |
549 | ||
550 | bL_switcher_put_enabled(); | |
551 | return ret; | |
552 | } | |
553 | ||
9ffb053d | 554 | static void ve_spc_cpufreq_remove(struct platform_device *pdev) |
a0f950d3 | 555 | { |
a0f950d3 SH |
556 | bL_switcher_get_enabled(); |
557 | __bLs_unregister_notifier(); | |
1f1b4650 | 558 | cpufreq_unregister_driver(&ve_spc_cpufreq_driver); |
a0f950d3 SH |
559 | bL_switcher_put_enabled(); |
560 | pr_info("%s: Un-registered platform driver: %s\n", __func__, | |
1f1b4650 | 561 | ve_spc_cpufreq_driver.name); |
47ac9aa1 SK |
562 | } |
563 | ||
564 | static struct platform_driver ve_spc_cpufreq_platdrv = { | |
565 | .driver = { | |
566 | .name = "vexpress-spc-cpufreq", | |
47ac9aa1 SK |
567 | }, |
568 | .probe = ve_spc_cpufreq_probe, | |
9ffb053d | 569 | .remove_new = ve_spc_cpufreq_remove, |
47ac9aa1 SK |
570 | }; |
571 | module_platform_driver(ve_spc_cpufreq_platdrv); | |
572 | ||
d1518399 | 573 | MODULE_ALIAS("platform:vexpress-spc-cpufreq"); |
a0f950d3 SH |
574 | MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); |
575 | MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); | |
576 | MODULE_DESCRIPTION("Vexpress SPC ARM big LITTLE cpufreq driver"); | |
577 | MODULE_LICENSE("GPL v2"); |