Commit | Line | Data |
---|---|---|
a0f950d3 | 1 | // SPDX-License-Identifier: GPL-2.0 |
47ac9aa1 SK |
2 | /* |
3 | * Versatile Express SPC CPUFreq Interface driver | |
4 | * | |
a0f950d3 SH |
5 | * Copyright (C) 2013 - 2019 ARM Ltd. |
6 | * Sudeep Holla <sudeep.holla@arm.com> | |
47ac9aa1 | 7 | * |
a0f950d3 SH |
8 | * Copyright (C) 2013 Linaro. |
9 | * Viresh Kumar <viresh.kumar@linaro.org> | |
47ac9aa1 SK |
10 | */ |
11 | ||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
13 | ||
a0f950d3 | 14 | #include <linux/clk.h> |
d9975b0b | 15 | #include <linux/cpu.h> |
47ac9aa1 | 16 | #include <linux/cpufreq.h> |
a0f950d3 | 17 | #include <linux/cpumask.h> |
a0f950d3 | 18 | #include <linux/device.h> |
47ac9aa1 | 19 | #include <linux/module.h> |
a0f950d3 SH |
20 | #include <linux/mutex.h> |
21 | #include <linux/of_platform.h> | |
47ac9aa1 SK |
22 | #include <linux/platform_device.h> |
23 | #include <linux/pm_opp.h> | |
a0f950d3 SH |
24 | #include <linux/slab.h> |
25 | #include <linux/topology.h> | |
47ac9aa1 SK |
26 | #include <linux/types.h> |
27 | ||
a0f950d3 SH |
28 | /* Currently we support only two clusters */ |
29 | #define A15_CLUSTER 0 | |
30 | #define A7_CLUSTER 1 | |
31 | #define MAX_CLUSTERS 2 | |
32 | ||
33 | #ifdef CONFIG_BL_SWITCHER | |
34 | #include <asm/bL_switcher.h> | |
35 | static bool bL_switching_enabled; | |
36 | #define is_bL_switching_enabled() bL_switching_enabled | |
37 | #define set_switching_enabled(x) (bL_switching_enabled = (x)) | |
38 | #else | |
39 | #define is_bL_switching_enabled() false | |
40 | #define set_switching_enabled(x) do { } while (0) | |
41 | #define bL_switch_request(...) do { } while (0) | |
42 | #define bL_switcher_put_enabled() do { } while (0) | |
43 | #define bL_switcher_get_enabled() do { } while (0) | |
44 | #endif | |
45 | ||
46 | #define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq) | |
47 | #define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq) | |
48 | ||
a0f950d3 SH |
49 | static struct clk *clk[MAX_CLUSTERS]; |
50 | static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1]; | |
51 | static atomic_t cluster_usage[MAX_CLUSTERS + 1]; | |
52 | ||
53 | static unsigned int clk_big_min; /* (Big) clock frequencies */ | |
54 | static unsigned int clk_little_max; /* Maximum clock frequency (Little) */ | |
55 | ||
56 | static DEFINE_PER_CPU(unsigned int, physical_cluster); | |
57 | static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq); | |
58 | ||
59 | static struct mutex cluster_lock[MAX_CLUSTERS]; | |
60 | ||
61 | static inline int raw_cpu_to_cluster(int cpu) | |
62 | { | |
63 | return topology_physical_package_id(cpu); | |
64 | } | |
65 | ||
66 | static inline int cpu_to_cluster(int cpu) | |
67 | { | |
68 | return is_bL_switching_enabled() ? | |
69 | MAX_CLUSTERS : raw_cpu_to_cluster(cpu); | |
70 | } | |
71 | ||
72 | static unsigned int find_cluster_maxfreq(int cluster) | |
73 | { | |
74 | int j; | |
75 | u32 max_freq = 0, cpu_freq; | |
76 | ||
77 | for_each_online_cpu(j) { | |
78 | cpu_freq = per_cpu(cpu_last_req_freq, j); | |
79 | ||
e318d2c8 SH |
80 | if (cluster == per_cpu(physical_cluster, j) && |
81 | max_freq < cpu_freq) | |
a0f950d3 SH |
82 | max_freq = cpu_freq; |
83 | } | |
84 | ||
a0f950d3 SH |
85 | return max_freq; |
86 | } | |
87 | ||
88 | static unsigned int clk_get_cpu_rate(unsigned int cpu) | |
89 | { | |
90 | u32 cur_cluster = per_cpu(physical_cluster, cpu); | |
91 | u32 rate = clk_get_rate(clk[cur_cluster]) / 1000; | |
92 | ||
93 | /* For switcher we use virtual A7 clock rates */ | |
94 | if (is_bL_switching_enabled()) | |
95 | rate = VIRT_FREQ(cur_cluster, rate); | |
96 | ||
a0f950d3 SH |
97 | return rate; |
98 | } | |
99 | ||
1f1b4650 | 100 | static unsigned int ve_spc_cpufreq_get_rate(unsigned int cpu) |
a0f950d3 | 101 | { |
09402d57 | 102 | if (is_bL_switching_enabled()) |
a0f950d3 | 103 | return per_cpu(cpu_last_req_freq, cpu); |
09402d57 | 104 | else |
a0f950d3 | 105 | return clk_get_cpu_rate(cpu); |
a0f950d3 SH |
106 | } |
107 | ||
108 | static unsigned int | |
1f1b4650 | 109 | ve_spc_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate) |
a0f950d3 SH |
110 | { |
111 | u32 new_rate, prev_rate; | |
112 | int ret; | |
113 | bool bLs = is_bL_switching_enabled(); | |
114 | ||
115 | mutex_lock(&cluster_lock[new_cluster]); | |
116 | ||
117 | if (bLs) { | |
118 | prev_rate = per_cpu(cpu_last_req_freq, cpu); | |
119 | per_cpu(cpu_last_req_freq, cpu) = rate; | |
120 | per_cpu(physical_cluster, cpu) = new_cluster; | |
121 | ||
122 | new_rate = find_cluster_maxfreq(new_cluster); | |
123 | new_rate = ACTUAL_FREQ(new_cluster, new_rate); | |
124 | } else { | |
125 | new_rate = rate; | |
126 | } | |
127 | ||
a0f950d3 SH |
128 | ret = clk_set_rate(clk[new_cluster], new_rate * 1000); |
129 | if (!ret) { | |
130 | /* | |
131 | * FIXME: clk_set_rate hasn't returned an error here however it | |
132 | * may be that clk_change_rate failed due to hardware or | |
133 | * firmware issues and wasn't able to report that due to the | |
134 | * current design of the clk core layer. To work around this | |
135 | * problem we will read back the clock rate and check it is | |
136 | * correct. This needs to be removed once clk core is fixed. | |
137 | */ | |
138 | if (clk_get_rate(clk[new_cluster]) != new_rate * 1000) | |
139 | ret = -EIO; | |
140 | } | |
141 | ||
142 | if (WARN_ON(ret)) { | |
a0f950d3 SH |
143 | if (bLs) { |
144 | per_cpu(cpu_last_req_freq, cpu) = prev_rate; | |
145 | per_cpu(physical_cluster, cpu) = old_cluster; | |
146 | } | |
147 | ||
148 | mutex_unlock(&cluster_lock[new_cluster]); | |
149 | ||
150 | return ret; | |
151 | } | |
152 | ||
153 | mutex_unlock(&cluster_lock[new_cluster]); | |
154 | ||
155 | /* Recalc freq for old cluster when switching clusters */ | |
156 | if (old_cluster != new_cluster) { | |
a0f950d3 SH |
157 | /* Switch cluster */ |
158 | bL_switch_request(cpu, new_cluster); | |
159 | ||
160 | mutex_lock(&cluster_lock[old_cluster]); | |
161 | ||
162 | /* Set freq of old cluster if there are cpus left on it */ | |
163 | new_rate = find_cluster_maxfreq(old_cluster); | |
164 | new_rate = ACTUAL_FREQ(old_cluster, new_rate); | |
165 | ||
09402d57 SH |
166 | if (new_rate && |
167 | clk_set_rate(clk[old_cluster], new_rate * 1000)) { | |
168 | pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n", | |
169 | __func__, ret, old_cluster); | |
a0f950d3 SH |
170 | } |
171 | mutex_unlock(&cluster_lock[old_cluster]); | |
172 | } | |
173 | ||
174 | return 0; | |
175 | } | |
176 | ||
177 | /* Set clock frequency */ | |
1f1b4650 SH |
178 | static int ve_spc_cpufreq_set_target(struct cpufreq_policy *policy, |
179 | unsigned int index) | |
a0f950d3 SH |
180 | { |
181 | u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster; | |
182 | unsigned int freqs_new; | |
a0f950d3 SH |
183 | |
184 | cur_cluster = cpu_to_cluster(cpu); | |
185 | new_cluster = actual_cluster = per_cpu(physical_cluster, cpu); | |
186 | ||
187 | freqs_new = freq_table[cur_cluster][index].frequency; | |
188 | ||
189 | if (is_bL_switching_enabled()) { | |
e318d2c8 | 190 | if (actual_cluster == A15_CLUSTER && freqs_new < clk_big_min) |
a0f950d3 | 191 | new_cluster = A7_CLUSTER; |
e318d2c8 SH |
192 | else if (actual_cluster == A7_CLUSTER && |
193 | freqs_new > clk_little_max) | |
a0f950d3 | 194 | new_cluster = A15_CLUSTER; |
a0f950d3 SH |
195 | } |
196 | ||
1a0419b0 IV |
197 | return ve_spc_cpufreq_set_rate(cpu, actual_cluster, new_cluster, |
198 | freqs_new); | |
a0f950d3 SH |
199 | } |
200 | ||
201 | static inline u32 get_table_count(struct cpufreq_frequency_table *table) | |
202 | { | |
203 | int count; | |
204 | ||
205 | for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++) | |
206 | ; | |
207 | ||
208 | return count; | |
209 | } | |
210 | ||
211 | /* get the minimum frequency in the cpufreq_frequency_table */ | |
212 | static inline u32 get_table_min(struct cpufreq_frequency_table *table) | |
213 | { | |
214 | struct cpufreq_frequency_table *pos; | |
e318d2c8 SH |
215 | u32 min_freq = ~0; |
216 | ||
a0f950d3 SH |
217 | cpufreq_for_each_entry(pos, table) |
218 | if (pos->frequency < min_freq) | |
219 | min_freq = pos->frequency; | |
220 | return min_freq; | |
221 | } | |
222 | ||
223 | /* get the maximum frequency in the cpufreq_frequency_table */ | |
224 | static inline u32 get_table_max(struct cpufreq_frequency_table *table) | |
225 | { | |
226 | struct cpufreq_frequency_table *pos; | |
e318d2c8 SH |
227 | u32 max_freq = 0; |
228 | ||
a0f950d3 SH |
229 | cpufreq_for_each_entry(pos, table) |
230 | if (pos->frequency > max_freq) | |
231 | max_freq = pos->frequency; | |
232 | return max_freq; | |
233 | } | |
234 | ||
e32beb06 SH |
235 | static bool search_frequency(struct cpufreq_frequency_table *table, int size, |
236 | unsigned int freq) | |
237 | { | |
238 | int count; | |
239 | ||
240 | for (count = 0; count < size; count++) { | |
241 | if (table[count].frequency == freq) | |
242 | return true; | |
243 | } | |
244 | ||
245 | return false; | |
246 | } | |
247 | ||
a0f950d3 SH |
248 | static int merge_cluster_tables(void) |
249 | { | |
250 | int i, j, k = 0, count = 1; | |
251 | struct cpufreq_frequency_table *table; | |
252 | ||
253 | for (i = 0; i < MAX_CLUSTERS; i++) | |
254 | count += get_table_count(freq_table[i]); | |
255 | ||
256 | table = kcalloc(count, sizeof(*table), GFP_KERNEL); | |
257 | if (!table) | |
258 | return -ENOMEM; | |
259 | ||
260 | freq_table[MAX_CLUSTERS] = table; | |
261 | ||
262 | /* Add in reverse order to get freqs in increasing order */ | |
e32beb06 | 263 | for (i = MAX_CLUSTERS - 1; i >= 0; i--, count = k) { |
a0f950d3 | 264 | for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END; |
e32beb06 SH |
265 | j++) { |
266 | if (i == A15_CLUSTER && | |
267 | search_frequency(table, count, freq_table[i][j].frequency)) | |
268 | continue; /* skip duplicates */ | |
269 | table[k++].frequency = | |
e318d2c8 | 270 | VIRT_FREQ(i, freq_table[i][j].frequency); |
a0f950d3 SH |
271 | } |
272 | } | |
273 | ||
274 | table[k].driver_data = k; | |
275 | table[k].frequency = CPUFREQ_TABLE_END; | |
276 | ||
a0f950d3 SH |
277 | return 0; |
278 | } | |
279 | ||
280 | static void _put_cluster_clk_and_freq_table(struct device *cpu_dev, | |
281 | const struct cpumask *cpumask) | |
282 | { | |
283 | u32 cluster = raw_cpu_to_cluster(cpu_dev->id); | |
284 | ||
285 | if (!freq_table[cluster]) | |
286 | return; | |
287 | ||
288 | clk_put(clk[cluster]); | |
289 | dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); | |
a0f950d3 SH |
290 | } |
291 | ||
292 | static void put_cluster_clk_and_freq_table(struct device *cpu_dev, | |
293 | const struct cpumask *cpumask) | |
294 | { | |
295 | u32 cluster = cpu_to_cluster(cpu_dev->id); | |
296 | int i; | |
297 | ||
298 | if (atomic_dec_return(&cluster_usage[cluster])) | |
299 | return; | |
300 | ||
301 | if (cluster < MAX_CLUSTERS) | |
302 | return _put_cluster_clk_and_freq_table(cpu_dev, cpumask); | |
303 | ||
304 | for_each_present_cpu(i) { | |
305 | struct device *cdev = get_cpu_device(i); | |
09402d57 SH |
306 | |
307 | if (!cdev) | |
a0f950d3 | 308 | return; |
a0f950d3 SH |
309 | |
310 | _put_cluster_clk_and_freq_table(cdev, cpumask); | |
311 | } | |
312 | ||
313 | /* free virtual table */ | |
314 | kfree(freq_table[cluster]); | |
315 | } | |
316 | ||
317 | static int _get_cluster_clk_and_freq_table(struct device *cpu_dev, | |
318 | const struct cpumask *cpumask) | |
319 | { | |
320 | u32 cluster = raw_cpu_to_cluster(cpu_dev->id); | |
321 | int ret; | |
322 | ||
323 | if (freq_table[cluster]) | |
324 | return 0; | |
325 | ||
1f1b4650 SH |
326 | /* |
327 | * platform specific SPC code must initialise the opp table | |
328 | * so just check if the OPP count is non-zero | |
329 | */ | |
330 | ret = dev_pm_opp_get_opp_count(cpu_dev) <= 0; | |
331 | if (ret) | |
a0f950d3 | 332 | goto out; |
a0f950d3 SH |
333 | |
334 | ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]); | |
09402d57 | 335 | if (ret) |
1f1b4650 | 336 | goto out; |
a0f950d3 SH |
337 | |
338 | clk[cluster] = clk_get(cpu_dev, NULL); | |
09402d57 | 339 | if (!IS_ERR(clk[cluster])) |
a0f950d3 | 340 | return 0; |
a0f950d3 SH |
341 | |
342 | dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n", | |
e318d2c8 | 343 | __func__, cpu_dev->id, cluster); |
a0f950d3 SH |
344 | ret = PTR_ERR(clk[cluster]); |
345 | dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); | |
346 | ||
a0f950d3 SH |
347 | out: |
348 | dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__, | |
e318d2c8 | 349 | cluster); |
a0f950d3 SH |
350 | return ret; |
351 | } | |
352 | ||
353 | static int get_cluster_clk_and_freq_table(struct device *cpu_dev, | |
354 | const struct cpumask *cpumask) | |
355 | { | |
356 | u32 cluster = cpu_to_cluster(cpu_dev->id); | |
357 | int i, ret; | |
358 | ||
359 | if (atomic_inc_return(&cluster_usage[cluster]) != 1) | |
360 | return 0; | |
361 | ||
362 | if (cluster < MAX_CLUSTERS) { | |
363 | ret = _get_cluster_clk_and_freq_table(cpu_dev, cpumask); | |
364 | if (ret) | |
365 | atomic_dec(&cluster_usage[cluster]); | |
366 | return ret; | |
367 | } | |
368 | ||
369 | /* | |
370 | * Get data for all clusters and fill virtual cluster with a merge of | |
371 | * both | |
372 | */ | |
373 | for_each_present_cpu(i) { | |
374 | struct device *cdev = get_cpu_device(i); | |
09402d57 SH |
375 | |
376 | if (!cdev) | |
a0f950d3 | 377 | return -ENODEV; |
a0f950d3 SH |
378 | |
379 | ret = _get_cluster_clk_and_freq_table(cdev, cpumask); | |
380 | if (ret) | |
381 | goto put_clusters; | |
382 | } | |
383 | ||
384 | ret = merge_cluster_tables(); | |
385 | if (ret) | |
386 | goto put_clusters; | |
387 | ||
388 | /* Assuming 2 cluster, set clk_big_min and clk_little_max */ | |
4a6e1352 SH |
389 | clk_big_min = get_table_min(freq_table[A15_CLUSTER]); |
390 | clk_little_max = VIRT_FREQ(A7_CLUSTER, | |
391 | get_table_max(freq_table[A7_CLUSTER])); | |
a0f950d3 | 392 | |
a0f950d3 SH |
393 | return 0; |
394 | ||
395 | put_clusters: | |
396 | for_each_present_cpu(i) { | |
397 | struct device *cdev = get_cpu_device(i); | |
09402d57 SH |
398 | |
399 | if (!cdev) | |
a0f950d3 | 400 | return -ENODEV; |
a0f950d3 SH |
401 | |
402 | _put_cluster_clk_and_freq_table(cdev, cpumask); | |
403 | } | |
404 | ||
405 | atomic_dec(&cluster_usage[cluster]); | |
406 | ||
407 | return ret; | |
408 | } | |
409 | ||
410 | /* Per-CPU initialization */ | |
1f1b4650 | 411 | static int ve_spc_cpufreq_init(struct cpufreq_policy *policy) |
a0f950d3 SH |
412 | { |
413 | u32 cur_cluster = cpu_to_cluster(policy->cpu); | |
414 | struct device *cpu_dev; | |
415 | int ret; | |
416 | ||
417 | cpu_dev = get_cpu_device(policy->cpu); | |
418 | if (!cpu_dev) { | |
419 | pr_err("%s: failed to get cpu%d device\n", __func__, | |
e318d2c8 | 420 | policy->cpu); |
a0f950d3 SH |
421 | return -ENODEV; |
422 | } | |
423 | ||
424 | if (cur_cluster < MAX_CLUSTERS) { | |
425 | int cpu; | |
426 | ||
c9385887 | 427 | dev_pm_opp_get_sharing_cpus(cpu_dev, policy->cpus); |
a0f950d3 SH |
428 | |
429 | for_each_cpu(cpu, policy->cpus) | |
430 | per_cpu(physical_cluster, cpu) = cur_cluster; | |
431 | } else { | |
432 | /* Assumption: during init, we are always running on A15 */ | |
433 | per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER; | |
434 | } | |
435 | ||
436 | ret = get_cluster_clk_and_freq_table(cpu_dev, policy->cpus); | |
437 | if (ret) | |
438 | return ret; | |
439 | ||
440 | policy->freq_table = freq_table[cur_cluster]; | |
1f1b4650 | 441 | policy->cpuinfo.transition_latency = 1000000; /* 1 ms */ |
a0f950d3 | 442 | |
a0f950d3 | 443 | if (is_bL_switching_enabled()) |
e318d2c8 SH |
444 | per_cpu(cpu_last_req_freq, policy->cpu) = |
445 | clk_get_cpu_rate(policy->cpu); | |
a0f950d3 SH |
446 | |
447 | dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu); | |
448 | return 0; | |
449 | } | |
450 | ||
1f1b4650 | 451 | static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy) |
a0f950d3 SH |
452 | { |
453 | struct device *cpu_dev; | |
a0f950d3 | 454 | |
a0f950d3 SH |
455 | cpu_dev = get_cpu_device(policy->cpu); |
456 | if (!cpu_dev) { | |
457 | pr_err("%s: failed to get cpu%d device\n", __func__, | |
e318d2c8 | 458 | policy->cpu); |
a0f950d3 SH |
459 | return -ENODEV; |
460 | } | |
461 | ||
462 | put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus); | |
a0f950d3 SH |
463 | return 0; |
464 | } | |
465 | ||
1f1b4650 SH |
466 | static struct cpufreq_driver ve_spc_cpufreq_driver = { |
467 | .name = "vexpress-spc", | |
5ae4a4b4 | 468 | .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY | |
a0f950d3 SH |
469 | CPUFREQ_NEED_INITIAL_FREQ_CHECK, |
470 | .verify = cpufreq_generic_frequency_table_verify, | |
1f1b4650 SH |
471 | .target_index = ve_spc_cpufreq_set_target, |
472 | .get = ve_spc_cpufreq_get_rate, | |
473 | .init = ve_spc_cpufreq_init, | |
474 | .exit = ve_spc_cpufreq_exit, | |
3fd23111 | 475 | .register_em = cpufreq_register_em_with_opp, |
a0f950d3 SH |
476 | .attr = cpufreq_generic_attr, |
477 | }; | |
478 | ||
479 | #ifdef CONFIG_BL_SWITCHER | |
480 | static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb, | |
481 | unsigned long action, void *_arg) | |
482 | { | |
483 | pr_debug("%s: action: %ld\n", __func__, action); | |
484 | ||
485 | switch (action) { | |
486 | case BL_NOTIFY_PRE_ENABLE: | |
487 | case BL_NOTIFY_PRE_DISABLE: | |
1f1b4650 | 488 | cpufreq_unregister_driver(&ve_spc_cpufreq_driver); |
a0f950d3 SH |
489 | break; |
490 | ||
491 | case BL_NOTIFY_POST_ENABLE: | |
492 | set_switching_enabled(true); | |
1f1b4650 | 493 | cpufreq_register_driver(&ve_spc_cpufreq_driver); |
a0f950d3 SH |
494 | break; |
495 | ||
496 | case BL_NOTIFY_POST_DISABLE: | |
497 | set_switching_enabled(false); | |
1f1b4650 | 498 | cpufreq_register_driver(&ve_spc_cpufreq_driver); |
a0f950d3 SH |
499 | break; |
500 | ||
501 | default: | |
502 | return NOTIFY_DONE; | |
503 | } | |
504 | ||
505 | return NOTIFY_OK; | |
506 | } | |
507 | ||
508 | static struct notifier_block bL_switcher_notifier = { | |
509 | .notifier_call = bL_cpufreq_switcher_notifier, | |
510 | }; | |
511 | ||
512 | static int __bLs_register_notifier(void) | |
513 | { | |
514 | return bL_switcher_register_notifier(&bL_switcher_notifier); | |
515 | } | |
516 | ||
517 | static int __bLs_unregister_notifier(void) | |
518 | { | |
519 | return bL_switcher_unregister_notifier(&bL_switcher_notifier); | |
520 | } | |
521 | #else | |
522 | static int __bLs_register_notifier(void) { return 0; } | |
523 | static int __bLs_unregister_notifier(void) { return 0; } | |
524 | #endif | |
525 | ||
1f1b4650 | 526 | static int ve_spc_cpufreq_probe(struct platform_device *pdev) |
a0f950d3 SH |
527 | { |
528 | int ret, i; | |
529 | ||
a0f950d3 SH |
530 | set_switching_enabled(bL_switcher_get_enabled()); |
531 | ||
532 | for (i = 0; i < MAX_CLUSTERS; i++) | |
533 | mutex_init(&cluster_lock[i]); | |
534 | ||
bb8c26d9 VK |
535 | if (!is_bL_switching_enabled()) |
536 | ve_spc_cpufreq_driver.flags |= CPUFREQ_IS_COOLING_DEV; | |
537 | ||
1f1b4650 | 538 | ret = cpufreq_register_driver(&ve_spc_cpufreq_driver); |
a0f950d3 SH |
539 | if (ret) { |
540 | pr_info("%s: Failed registering platform driver: %s, err: %d\n", | |
1f1b4650 | 541 | __func__, ve_spc_cpufreq_driver.name, ret); |
a0f950d3 SH |
542 | } else { |
543 | ret = __bLs_register_notifier(); | |
1f1b4650 SH |
544 | if (ret) |
545 | cpufreq_unregister_driver(&ve_spc_cpufreq_driver); | |
546 | else | |
a0f950d3 | 547 | pr_info("%s: Registered platform driver: %s\n", |
1f1b4650 | 548 | __func__, ve_spc_cpufreq_driver.name); |
a0f950d3 SH |
549 | } |
550 | ||
551 | bL_switcher_put_enabled(); | |
552 | return ret; | |
553 | } | |
554 | ||
1f1b4650 | 555 | static int ve_spc_cpufreq_remove(struct platform_device *pdev) |
a0f950d3 | 556 | { |
a0f950d3 SH |
557 | bL_switcher_get_enabled(); |
558 | __bLs_unregister_notifier(); | |
1f1b4650 | 559 | cpufreq_unregister_driver(&ve_spc_cpufreq_driver); |
a0f950d3 SH |
560 | bL_switcher_put_enabled(); |
561 | pr_info("%s: Un-registered platform driver: %s\n", __func__, | |
1f1b4650 | 562 | ve_spc_cpufreq_driver.name); |
47ac9aa1 SK |
563 | return 0; |
564 | } | |
565 | ||
566 | static struct platform_driver ve_spc_cpufreq_platdrv = { | |
567 | .driver = { | |
568 | .name = "vexpress-spc-cpufreq", | |
47ac9aa1 SK |
569 | }, |
570 | .probe = ve_spc_cpufreq_probe, | |
571 | .remove = ve_spc_cpufreq_remove, | |
572 | }; | |
573 | module_platform_driver(ve_spc_cpufreq_platdrv); | |
574 | ||
d1518399 | 575 | MODULE_ALIAS("platform:vexpress-spc-cpufreq"); |
a0f950d3 SH |
576 | MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); |
577 | MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); | |
578 | MODULE_DESCRIPTION("Vexpress SPC ARM big LITTLE cpufreq driver"); | |
579 | MODULE_LICENSE("GPL v2"); |