cpufreq: arm_big_little: set 'physical_cluster' for each CPU
[linux-2.6-block.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
bb176f7d 6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
1da177e4 7 *
c32b6b8e 8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 9 * Added handling for CPU hotplug
8ff69732
DJ
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 12 *
1da177e4
LT
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
1da177e4
LT
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
5ff0a268 20#include <linux/cpu.h>
1da177e4
LT
21#include <linux/cpufreq.h>
22#include <linux/delay.h>
1da177e4 23#include <linux/device.h>
5ff0a268
VK
24#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
3fc54d37 27#include <linux/mutex.h>
5ff0a268 28#include <linux/slab.h>
2f0aea93 29#include <linux/suspend.h>
5ff0a268 30#include <linux/tick.h>
6f4f2723
TR
31#include <trace/events/power.h>
32
1da177e4 33/**
cd878479 34 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
35 * level driver of CPUFreq support, and its spinlock. This lock
36 * also protects the cpufreq_cpu_data array.
37 */
1c3d85dd 38static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 39static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
8414809c 40static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
bb176f7d 41static DEFINE_RWLOCK(cpufreq_driver_lock);
6f1e4efd 42DEFINE_MUTEX(cpufreq_governor_lock);
c88a1f8b 43static LIST_HEAD(cpufreq_policy_list);
bb176f7d 44
084f3493 45/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
1da177e4 47
2f0aea93
VK
48/* Flag to suspend/resume CPUFreq governors */
49static bool cpufreq_suspended;
1da177e4 50
9c0ebcf7
VK
51static inline bool has_target(void)
52{
53 return cpufreq_driver->target_index || cpufreq_driver->target;
54}
55
6eed9404
VK
56/*
57 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
58 * sections
59 */
60static DECLARE_RWSEM(cpufreq_rwsem);
61
1da177e4 62/* internal prototypes */
29464f28
DJ
63static int __cpufreq_governor(struct cpufreq_policy *policy,
64 unsigned int event);
5a01f2e8 65static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 66static void handle_update(struct work_struct *work);
1da177e4
LT
67
68/**
32ee8c3e
DJ
69 * Two notifier lists: the "policy" list is involved in the
70 * validation process for a new CPU frequency policy; the
1da177e4
LT
71 * "transition" list for kernel code that needs to handle
72 * changes to devices when the CPU clock speed changes.
73 * The mutex locks both lists.
74 */
e041c683 75static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 76static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 77
74212ca4 78static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
79static int __init init_cpufreq_transition_notifier_list(void)
80{
81 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 82 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
83 return 0;
84}
b3438f82 85pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 86
a7b422cd 87static int off __read_mostly;
da584455 88static int cpufreq_disabled(void)
a7b422cd
KRW
89{
90 return off;
91}
92void disable_cpufreq(void)
93{
94 off = 1;
95}
1da177e4 96static LIST_HEAD(cpufreq_governor_list);
29464f28 97static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 98
4d5dcc42
VK
99bool have_governor_per_policy(void)
100{
0b981e70 101 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
4d5dcc42 102}
3f869d6d 103EXPORT_SYMBOL_GPL(have_governor_per_policy);
4d5dcc42 104
944e9a03
VK
105struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
106{
107 if (have_governor_per_policy())
108 return &policy->kobj;
109 else
110 return cpufreq_global_kobject;
111}
112EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
113
72a4ce34
VK
114static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
115{
116 u64 idle_time;
117 u64 cur_wall_time;
118 u64 busy_time;
119
120 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
121
122 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
123 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
124 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
125 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
126 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
127 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
128
129 idle_time = cur_wall_time - busy_time;
130 if (wall)
131 *wall = cputime_to_usecs(cur_wall_time);
132
133 return cputime_to_usecs(idle_time);
134}
135
136u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
137{
138 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
139
140 if (idle_time == -1ULL)
141 return get_cpu_idle_time_jiffy(cpu, wall);
142 else if (!io_busy)
143 idle_time += get_cpu_iowait_time_us(cpu, wall);
144
145 return idle_time;
146}
147EXPORT_SYMBOL_GPL(get_cpu_idle_time);
148
70e9e778
VK
149/*
150 * This is a generic cpufreq init() routine which can be used by cpufreq
151 * drivers of SMP systems. It will do following:
152 * - validate & show freq table passed
153 * - set policies transition latency
154 * - policy->cpus with all possible CPUs
155 */
156int cpufreq_generic_init(struct cpufreq_policy *policy,
157 struct cpufreq_frequency_table *table,
158 unsigned int transition_latency)
159{
160 int ret;
161
162 ret = cpufreq_table_validate_and_show(policy, table);
163 if (ret) {
164 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
165 return ret;
166 }
167
168 policy->cpuinfo.transition_latency = transition_latency;
169
170 /*
171 * The driver only supports the SMP configuartion where all processors
172 * share the clock and voltage and clock.
173 */
174 cpumask_setall(policy->cpus);
175
176 return 0;
177}
178EXPORT_SYMBOL_GPL(cpufreq_generic_init);
179
652ed95d
VK
180unsigned int cpufreq_generic_get(unsigned int cpu)
181{
182 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
183
184 if (!policy || IS_ERR(policy->clk)) {
e837f9b5
JP
185 pr_err("%s: No %s associated to cpu: %d\n",
186 __func__, policy ? "clk" : "policy", cpu);
652ed95d
VK
187 return 0;
188 }
189
190 return clk_get_rate(policy->clk) / 1000;
191}
192EXPORT_SYMBOL_GPL(cpufreq_generic_get);
193
e0b3165b
VK
194/* Only for cpufreq core internal use */
195struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
196{
197 return per_cpu(cpufreq_cpu_data, cpu);
198}
199
6eed9404 200struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
1da177e4 201{
6eed9404 202 struct cpufreq_policy *policy = NULL;
1da177e4
LT
203 unsigned long flags;
204
6eed9404
VK
205 if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
206 return NULL;
207
208 if (!down_read_trylock(&cpufreq_rwsem))
209 return NULL;
1da177e4
LT
210
211 /* get the cpufreq driver */
1c3d85dd 212 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 213
6eed9404
VK
214 if (cpufreq_driver) {
215 /* get the CPU */
216 policy = per_cpu(cpufreq_cpu_data, cpu);
217 if (policy)
218 kobject_get(&policy->kobj);
219 }
1da177e4 220
6eed9404 221 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 222
3a3e9e06 223 if (!policy)
6eed9404 224 up_read(&cpufreq_rwsem);
1da177e4 225
3a3e9e06 226 return policy;
a9144436 227}
1da177e4
LT
228EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
229
3a3e9e06 230void cpufreq_cpu_put(struct cpufreq_policy *policy)
1da177e4 231{
d5aaffa9
DB
232 if (cpufreq_disabled())
233 return;
234
6eed9404
VK
235 kobject_put(&policy->kobj);
236 up_read(&cpufreq_rwsem);
1da177e4
LT
237}
238EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
239
1da177e4
LT
240/*********************************************************************
241 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
242 *********************************************************************/
243
244/**
245 * adjust_jiffies - adjust the system "loops_per_jiffy"
246 *
247 * This function alters the system "loops_per_jiffy" for the clock
248 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 249 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
250 * per-CPU loops_per_jiffy value wherever possible.
251 */
252#ifndef CONFIG_SMP
253static unsigned long l_p_j_ref;
bb176f7d 254static unsigned int l_p_j_ref_freq;
1da177e4 255
858119e1 256static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
257{
258 if (ci->flags & CPUFREQ_CONST_LOOPS)
259 return;
260
261 if (!l_p_j_ref_freq) {
262 l_p_j_ref = loops_per_jiffy;
263 l_p_j_ref_freq = ci->old;
e837f9b5
JP
264 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
265 l_p_j_ref, l_p_j_ref_freq);
1da177e4 266 }
bb176f7d 267 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 268 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
269 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
270 ci->new);
e837f9b5
JP
271 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
272 loops_per_jiffy, ci->new);
1da177e4
LT
273 }
274}
275#else
e08f5f5b
GS
276static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
277{
278 return;
279}
1da177e4
LT
280#endif
281
0956df9c 282static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
b43a7ffb 283 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
284{
285 BUG_ON(irqs_disabled());
286
d5aaffa9
DB
287 if (cpufreq_disabled())
288 return;
289
1c3d85dd 290 freqs->flags = cpufreq_driver->flags;
2d06d8c4 291 pr_debug("notification %u of frequency transition to %u kHz\n",
e837f9b5 292 state, freqs->new);
1da177e4 293
1da177e4 294 switch (state) {
e4472cb3 295
1da177e4 296 case CPUFREQ_PRECHANGE:
32ee8c3e 297 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
298 * which is not equal to what the cpufreq core thinks is
299 * "old frequency".
1da177e4 300 */
1c3d85dd 301 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
302 if ((policy) && (policy->cpu == freqs->cpu) &&
303 (policy->cur) && (policy->cur != freqs->old)) {
e837f9b5
JP
304 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
305 freqs->old, policy->cur);
e4472cb3 306 freqs->old = policy->cur;
1da177e4
LT
307 }
308 }
b4dfdbb3 309 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 310 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
311 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
312 break;
e4472cb3 313
1da177e4
LT
314 case CPUFREQ_POSTCHANGE:
315 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
e837f9b5
JP
316 pr_debug("FREQ: %lu - CPU: %lu\n",
317 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
25e41933 318 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 319 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 320 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
321 if (likely(policy) && likely(policy->cpu == freqs->cpu))
322 policy->cur = freqs->new;
1da177e4
LT
323 break;
324 }
1da177e4 325}
bb176f7d 326
b43a7ffb
VK
327/**
328 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
329 * on frequency transition.
330 *
331 * This function calls the transition notifiers and the "adjust_jiffies"
332 * function. It is called twice on all CPU frequency changes that have
333 * external effects.
334 */
335void cpufreq_notify_transition(struct cpufreq_policy *policy,
336 struct cpufreq_freqs *freqs, unsigned int state)
337{
338 for_each_cpu(freqs->cpu, policy->cpus)
339 __cpufreq_notify_transition(policy, freqs, state);
340}
1da177e4
LT
341EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
342
f7ba3b41
VK
343/* Do post notifications when there are chances that transition has failed */
344void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
345 struct cpufreq_freqs *freqs, int transition_failed)
346{
347 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
348 if (!transition_failed)
349 return;
350
351 swap(freqs->old, freqs->new);
352 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
353 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
354}
355EXPORT_SYMBOL_GPL(cpufreq_notify_post_transition);
356
1da177e4 357
1da177e4
LT
358/*********************************************************************
359 * SYSFS INTERFACE *
360 *********************************************************************/
8a5c74a1 361static ssize_t show_boost(struct kobject *kobj,
6f19efc0
LM
362 struct attribute *attr, char *buf)
363{
364 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
365}
366
367static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
368 const char *buf, size_t count)
369{
370 int ret, enable;
371
372 ret = sscanf(buf, "%d", &enable);
373 if (ret != 1 || enable < 0 || enable > 1)
374 return -EINVAL;
375
376 if (cpufreq_boost_trigger_state(enable)) {
e837f9b5
JP
377 pr_err("%s: Cannot %s BOOST!\n",
378 __func__, enable ? "enable" : "disable");
6f19efc0
LM
379 return -EINVAL;
380 }
381
e837f9b5
JP
382 pr_debug("%s: cpufreq BOOST %s\n",
383 __func__, enable ? "enabled" : "disabled");
6f19efc0
LM
384
385 return count;
386}
387define_one_global_rw(boost);
1da177e4 388
3bcb09a3
JF
389static struct cpufreq_governor *__find_governor(const char *str_governor)
390{
391 struct cpufreq_governor *t;
392
393 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 394 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
395 return t;
396
397 return NULL;
398}
399
1da177e4
LT
400/**
401 * cpufreq_parse_governor - parse a governor string
402 */
905d77cd 403static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
404 struct cpufreq_governor **governor)
405{
3bcb09a3 406 int err = -EINVAL;
1c3d85dd
RW
407
408 if (!cpufreq_driver)
3bcb09a3
JF
409 goto out;
410
1c3d85dd 411 if (cpufreq_driver->setpolicy) {
1da177e4
LT
412 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
413 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 414 err = 0;
e08f5f5b
GS
415 } else if (!strnicmp(str_governor, "powersave",
416 CPUFREQ_NAME_LEN)) {
1da177e4 417 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 418 err = 0;
1da177e4 419 }
9c0ebcf7 420 } else if (has_target()) {
1da177e4 421 struct cpufreq_governor *t;
3bcb09a3 422
3fc54d37 423 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
424
425 t = __find_governor(str_governor);
426
ea714970 427 if (t == NULL) {
1a8e1463 428 int ret;
ea714970 429
1a8e1463
KC
430 mutex_unlock(&cpufreq_governor_mutex);
431 ret = request_module("cpufreq_%s", str_governor);
432 mutex_lock(&cpufreq_governor_mutex);
ea714970 433
1a8e1463
KC
434 if (ret == 0)
435 t = __find_governor(str_governor);
ea714970
JF
436 }
437
3bcb09a3
JF
438 if (t != NULL) {
439 *governor = t;
440 err = 0;
1da177e4 441 }
3bcb09a3 442
3fc54d37 443 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 444 }
29464f28 445out:
3bcb09a3 446 return err;
1da177e4 447}
1da177e4 448
1da177e4 449/**
e08f5f5b
GS
450 * cpufreq_per_cpu_attr_read() / show_##file_name() -
451 * print out cpufreq information
1da177e4
LT
452 *
453 * Write out information from cpufreq_driver->policy[cpu]; object must be
454 * "unsigned int".
455 */
456
32ee8c3e
DJ
457#define show_one(file_name, object) \
458static ssize_t show_##file_name \
905d77cd 459(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 460{ \
29464f28 461 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
462}
463
464show_one(cpuinfo_min_freq, cpuinfo.min_freq);
465show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 466show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
467show_one(scaling_min_freq, min);
468show_one(scaling_max_freq, max);
469show_one(scaling_cur_freq, cur);
470
037ce839 471static int cpufreq_set_policy(struct cpufreq_policy *policy,
3a3e9e06 472 struct cpufreq_policy *new_policy);
7970e08b 473
1da177e4
LT
474/**
475 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
476 */
477#define store_one(file_name, object) \
478static ssize_t store_##file_name \
905d77cd 479(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 480{ \
5136fa56 481 int ret; \
1da177e4
LT
482 struct cpufreq_policy new_policy; \
483 \
484 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
485 if (ret) \
486 return -EINVAL; \
487 \
29464f28 488 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
489 if (ret != 1) \
490 return -EINVAL; \
491 \
037ce839 492 ret = cpufreq_set_policy(policy, &new_policy); \
7970e08b 493 policy->user_policy.object = policy->object; \
1da177e4
LT
494 \
495 return ret ? ret : count; \
496}
497
29464f28
DJ
498store_one(scaling_min_freq, min);
499store_one(scaling_max_freq, max);
1da177e4
LT
500
501/**
502 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
503 */
905d77cd
DJ
504static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
505 char *buf)
1da177e4 506{
5a01f2e8 507 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
508 if (!cur_freq)
509 return sprintf(buf, "<unknown>");
510 return sprintf(buf, "%u\n", cur_freq);
511}
512
1da177e4
LT
513/**
514 * show_scaling_governor - show the current policy for the specified CPU
515 */
905d77cd 516static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 517{
29464f28 518 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
519 return sprintf(buf, "powersave\n");
520 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
521 return sprintf(buf, "performance\n");
522 else if (policy->governor)
4b972f0b 523 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 524 policy->governor->name);
1da177e4
LT
525 return -EINVAL;
526}
527
1da177e4
LT
528/**
529 * store_scaling_governor - store policy for the specified CPU
530 */
905d77cd
DJ
531static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
532 const char *buf, size_t count)
1da177e4 533{
5136fa56 534 int ret;
1da177e4
LT
535 char str_governor[16];
536 struct cpufreq_policy new_policy;
537
538 ret = cpufreq_get_policy(&new_policy, policy->cpu);
539 if (ret)
540 return ret;
541
29464f28 542 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
543 if (ret != 1)
544 return -EINVAL;
545
e08f5f5b
GS
546 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
547 &new_policy.governor))
1da177e4
LT
548 return -EINVAL;
549
037ce839 550 ret = cpufreq_set_policy(policy, &new_policy);
7970e08b
TR
551
552 policy->user_policy.policy = policy->policy;
553 policy->user_policy.governor = policy->governor;
7970e08b 554
e08f5f5b
GS
555 if (ret)
556 return ret;
557 else
558 return count;
1da177e4
LT
559}
560
561/**
562 * show_scaling_driver - show the cpufreq driver currently loaded
563 */
905d77cd 564static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 565{
1c3d85dd 566 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
567}
568
569/**
570 * show_scaling_available_governors - show the available CPUfreq governors
571 */
905d77cd
DJ
572static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
573 char *buf)
1da177e4
LT
574{
575 ssize_t i = 0;
576 struct cpufreq_governor *t;
577
9c0ebcf7 578 if (!has_target()) {
1da177e4
LT
579 i += sprintf(buf, "performance powersave");
580 goto out;
581 }
582
583 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
584 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
585 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 586 goto out;
4b972f0b 587 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 588 }
7d5e350f 589out:
1da177e4
LT
590 i += sprintf(&buf[i], "\n");
591 return i;
592}
e8628dd0 593
f4fd3797 594ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
595{
596 ssize_t i = 0;
597 unsigned int cpu;
598
835481d9 599 for_each_cpu(cpu, mask) {
1da177e4
LT
600 if (i)
601 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
602 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
603 if (i >= (PAGE_SIZE - 5))
29464f28 604 break;
1da177e4
LT
605 }
606 i += sprintf(&buf[i], "\n");
607 return i;
608}
f4fd3797 609EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
1da177e4 610
e8628dd0
DW
611/**
612 * show_related_cpus - show the CPUs affected by each transition even if
613 * hw coordination is in use
614 */
615static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
616{
f4fd3797 617 return cpufreq_show_cpus(policy->related_cpus, buf);
e8628dd0
DW
618}
619
620/**
621 * show_affected_cpus - show the CPUs affected by each transition
622 */
623static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
624{
f4fd3797 625 return cpufreq_show_cpus(policy->cpus, buf);
e8628dd0
DW
626}
627
9e76988e 628static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 629 const char *buf, size_t count)
9e76988e
VP
630{
631 unsigned int freq = 0;
632 unsigned int ret;
633
879000f9 634 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
635 return -EINVAL;
636
637 ret = sscanf(buf, "%u", &freq);
638 if (ret != 1)
639 return -EINVAL;
640
641 policy->governor->store_setspeed(policy, freq);
642
643 return count;
644}
645
646static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
647{
879000f9 648 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
649 return sprintf(buf, "<unsupported>\n");
650
651 return policy->governor->show_setspeed(policy, buf);
652}
1da177e4 653
e2f74f35 654/**
8bf1ac72 655 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
656 */
657static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
658{
659 unsigned int limit;
660 int ret;
1c3d85dd
RW
661 if (cpufreq_driver->bios_limit) {
662 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
e2f74f35
TR
663 if (!ret)
664 return sprintf(buf, "%u\n", limit);
665 }
666 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
667}
668
6dad2a29
BP
669cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
670cpufreq_freq_attr_ro(cpuinfo_min_freq);
671cpufreq_freq_attr_ro(cpuinfo_max_freq);
672cpufreq_freq_attr_ro(cpuinfo_transition_latency);
673cpufreq_freq_attr_ro(scaling_available_governors);
674cpufreq_freq_attr_ro(scaling_driver);
675cpufreq_freq_attr_ro(scaling_cur_freq);
676cpufreq_freq_attr_ro(bios_limit);
677cpufreq_freq_attr_ro(related_cpus);
678cpufreq_freq_attr_ro(affected_cpus);
679cpufreq_freq_attr_rw(scaling_min_freq);
680cpufreq_freq_attr_rw(scaling_max_freq);
681cpufreq_freq_attr_rw(scaling_governor);
682cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 683
905d77cd 684static struct attribute *default_attrs[] = {
1da177e4
LT
685 &cpuinfo_min_freq.attr,
686 &cpuinfo_max_freq.attr,
ed129784 687 &cpuinfo_transition_latency.attr,
1da177e4
LT
688 &scaling_min_freq.attr,
689 &scaling_max_freq.attr,
690 &affected_cpus.attr,
e8628dd0 691 &related_cpus.attr,
1da177e4
LT
692 &scaling_governor.attr,
693 &scaling_driver.attr,
694 &scaling_available_governors.attr,
9e76988e 695 &scaling_setspeed.attr,
1da177e4
LT
696 NULL
697};
698
29464f28
DJ
699#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
700#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 701
29464f28 702static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 703{
905d77cd
DJ
704 struct cpufreq_policy *policy = to_policy(kobj);
705 struct freq_attr *fattr = to_attr(attr);
1b750e3b 706 ssize_t ret;
6eed9404
VK
707
708 if (!down_read_trylock(&cpufreq_rwsem))
1b750e3b 709 return -EINVAL;
5a01f2e8 710
ad7722da 711 down_read(&policy->rwsem);
5a01f2e8 712
e08f5f5b
GS
713 if (fattr->show)
714 ret = fattr->show(policy, buf);
715 else
716 ret = -EIO;
717
ad7722da 718 up_read(&policy->rwsem);
6eed9404 719 up_read(&cpufreq_rwsem);
1b750e3b 720
1da177e4
LT
721 return ret;
722}
723
905d77cd
DJ
724static ssize_t store(struct kobject *kobj, struct attribute *attr,
725 const char *buf, size_t count)
1da177e4 726{
905d77cd
DJ
727 struct cpufreq_policy *policy = to_policy(kobj);
728 struct freq_attr *fattr = to_attr(attr);
a07530b4 729 ssize_t ret = -EINVAL;
6eed9404 730
4f750c93
SB
731 get_online_cpus();
732
733 if (!cpu_online(policy->cpu))
734 goto unlock;
735
6eed9404 736 if (!down_read_trylock(&cpufreq_rwsem))
4f750c93 737 goto unlock;
5a01f2e8 738
ad7722da 739 down_write(&policy->rwsem);
5a01f2e8 740
e08f5f5b
GS
741 if (fattr->store)
742 ret = fattr->store(policy, buf, count);
743 else
744 ret = -EIO;
745
ad7722da 746 up_write(&policy->rwsem);
6eed9404 747
6eed9404 748 up_read(&cpufreq_rwsem);
4f750c93
SB
749unlock:
750 put_online_cpus();
751
1da177e4
LT
752 return ret;
753}
754
905d77cd 755static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 756{
905d77cd 757 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 758 pr_debug("last reference is dropped\n");
1da177e4
LT
759 complete(&policy->kobj_unregister);
760}
761
52cf25d0 762static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
763 .show = show,
764 .store = store,
765};
766
767static struct kobj_type ktype_cpufreq = {
768 .sysfs_ops = &sysfs_ops,
769 .default_attrs = default_attrs,
770 .release = cpufreq_sysfs_release,
771};
772
2361be23
VK
773struct kobject *cpufreq_global_kobject;
774EXPORT_SYMBOL(cpufreq_global_kobject);
775
776static int cpufreq_global_kobject_usage;
777
778int cpufreq_get_global_kobject(void)
779{
780 if (!cpufreq_global_kobject_usage++)
781 return kobject_add(cpufreq_global_kobject,
782 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
783
784 return 0;
785}
786EXPORT_SYMBOL(cpufreq_get_global_kobject);
787
788void cpufreq_put_global_kobject(void)
789{
790 if (!--cpufreq_global_kobject_usage)
791 kobject_del(cpufreq_global_kobject);
792}
793EXPORT_SYMBOL(cpufreq_put_global_kobject);
794
795int cpufreq_sysfs_create_file(const struct attribute *attr)
796{
797 int ret = cpufreq_get_global_kobject();
798
799 if (!ret) {
800 ret = sysfs_create_file(cpufreq_global_kobject, attr);
801 if (ret)
802 cpufreq_put_global_kobject();
803 }
804
805 return ret;
806}
807EXPORT_SYMBOL(cpufreq_sysfs_create_file);
808
809void cpufreq_sysfs_remove_file(const struct attribute *attr)
810{
811 sysfs_remove_file(cpufreq_global_kobject, attr);
812 cpufreq_put_global_kobject();
813}
814EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
815
19d6f7ec 816/* symlink affected CPUs */
308b60e7 817static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
19d6f7ec
DJ
818{
819 unsigned int j;
820 int ret = 0;
821
822 for_each_cpu(j, policy->cpus) {
8a25a2fd 823 struct device *cpu_dev;
19d6f7ec 824
308b60e7 825 if (j == policy->cpu)
19d6f7ec 826 continue;
19d6f7ec 827
e8fdde10 828 pr_debug("Adding link for CPU: %u\n", j);
8a25a2fd
KS
829 cpu_dev = get_cpu_device(j);
830 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec 831 "cpufreq");
71c3461e
RW
832 if (ret)
833 break;
19d6f7ec
DJ
834 }
835 return ret;
836}
837
308b60e7 838static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
8a25a2fd 839 struct device *dev)
909a694e
DJ
840{
841 struct freq_attr **drv_attr;
909a694e 842 int ret = 0;
909a694e
DJ
843
844 /* prepare interface data */
845 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 846 &dev->kobj, "cpufreq");
909a694e
DJ
847 if (ret)
848 return ret;
849
850 /* set up files for this cpu device */
1c3d85dd 851 drv_attr = cpufreq_driver->attr;
909a694e
DJ
852 while ((drv_attr) && (*drv_attr)) {
853 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
854 if (ret)
1c3d85dd 855 goto err_out_kobj_put;
909a694e
DJ
856 drv_attr++;
857 }
1c3d85dd 858 if (cpufreq_driver->get) {
909a694e
DJ
859 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
860 if (ret)
1c3d85dd 861 goto err_out_kobj_put;
909a694e 862 }
9c0ebcf7 863 if (has_target()) {
909a694e
DJ
864 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
865 if (ret)
1c3d85dd 866 goto err_out_kobj_put;
909a694e 867 }
1c3d85dd 868 if (cpufreq_driver->bios_limit) {
e2f74f35
TR
869 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
870 if (ret)
1c3d85dd 871 goto err_out_kobj_put;
e2f74f35 872 }
909a694e 873
308b60e7 874 ret = cpufreq_add_dev_symlink(policy);
ecf7e461
DJ
875 if (ret)
876 goto err_out_kobj_put;
877
e18f1682
SB
878 return ret;
879
880err_out_kobj_put:
881 kobject_put(&policy->kobj);
882 wait_for_completion(&policy->kobj_unregister);
883 return ret;
884}
885
886static void cpufreq_init_policy(struct cpufreq_policy *policy)
887{
6e2c89d1 888 struct cpufreq_governor *gov = NULL;
e18f1682
SB
889 struct cpufreq_policy new_policy;
890 int ret = 0;
891
d5b73cd8 892 memcpy(&new_policy, policy, sizeof(*policy));
a27a9ab7 893
6e2c89d1 894 /* Update governor of new_policy to the governor used before hotplug */
895 gov = __find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
896 if (gov)
897 pr_debug("Restoring governor %s for cpu %d\n",
898 policy->governor->name, policy->cpu);
899 else
900 gov = CPUFREQ_DEFAULT_GOVERNOR;
901
902 new_policy.governor = gov;
903
a27a9ab7
JB
904 /* Use the default policy if its valid. */
905 if (cpufreq_driver->setpolicy)
6e2c89d1 906 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
ecf7e461
DJ
907
908 /* set default policy */
037ce839 909 ret = cpufreq_set_policy(policy, &new_policy);
ecf7e461 910 if (ret) {
2d06d8c4 911 pr_debug("setting policy failed\n");
1c3d85dd
RW
912 if (cpufreq_driver->exit)
913 cpufreq_driver->exit(policy);
ecf7e461 914 }
909a694e
DJ
915}
916
fcf80582 917#ifdef CONFIG_HOTPLUG_CPU
d8d3b471 918static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
42f921a6 919 unsigned int cpu, struct device *dev)
fcf80582 920{
9c0ebcf7 921 int ret = 0;
fcf80582
VK
922 unsigned long flags;
923
9c0ebcf7 924 if (has_target()) {
3de9bdeb
VK
925 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
926 if (ret) {
927 pr_err("%s: Failed to stop governor\n", __func__);
928 return ret;
929 }
930 }
fcf80582 931
ad7722da 932 down_write(&policy->rwsem);
2eaa3e2d 933
0d1857a1 934 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 935
fcf80582
VK
936 cpumask_set_cpu(cpu, policy->cpus);
937 per_cpu(cpufreq_cpu_data, cpu) = policy;
0d1857a1 938 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 939
ad7722da 940 up_write(&policy->rwsem);
2eaa3e2d 941
9c0ebcf7 942 if (has_target()) {
3de9bdeb
VK
943 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
944 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
945 pr_err("%s: Failed to start governor\n", __func__);
946 return ret;
947 }
820c6ca2 948 }
fcf80582 949
42f921a6 950 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
fcf80582
VK
951}
952#endif
1da177e4 953
8414809c
SB
954static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
955{
956 struct cpufreq_policy *policy;
957 unsigned long flags;
958
44871c9c 959 read_lock_irqsave(&cpufreq_driver_lock, flags);
8414809c
SB
960
961 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
962
44871c9c 963 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
8414809c 964
6e2c89d1 965 policy->governor = NULL;
966
8414809c
SB
967 return policy;
968}
969
e9698cc5
SB
970static struct cpufreq_policy *cpufreq_policy_alloc(void)
971{
972 struct cpufreq_policy *policy;
973
974 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
975 if (!policy)
976 return NULL;
977
978 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
979 goto err_free_policy;
980
981 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
982 goto err_free_cpumask;
983
c88a1f8b 984 INIT_LIST_HEAD(&policy->policy_list);
ad7722da 985 init_rwsem(&policy->rwsem);
986
e9698cc5
SB
987 return policy;
988
989err_free_cpumask:
990 free_cpumask_var(policy->cpus);
991err_free_policy:
992 kfree(policy);
993
994 return NULL;
995}
996
42f921a6
VK
997static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
998{
999 struct kobject *kobj;
1000 struct completion *cmp;
1001
fcd7af91
VK
1002 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1003 CPUFREQ_REMOVE_POLICY, policy);
1004
42f921a6
VK
1005 down_read(&policy->rwsem);
1006 kobj = &policy->kobj;
1007 cmp = &policy->kobj_unregister;
1008 up_read(&policy->rwsem);
1009 kobject_put(kobj);
1010
1011 /*
1012 * We need to make sure that the underlying kobj is
1013 * actually not referenced anymore by anybody before we
1014 * proceed with unloading.
1015 */
1016 pr_debug("waiting for dropping of refcount\n");
1017 wait_for_completion(cmp);
1018 pr_debug("wait complete\n");
1019}
1020
e9698cc5
SB
1021static void cpufreq_policy_free(struct cpufreq_policy *policy)
1022{
1023 free_cpumask_var(policy->related_cpus);
1024 free_cpumask_var(policy->cpus);
1025 kfree(policy);
1026}
1027
0d66b91e
SB
1028static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1029{
99ec899e 1030 if (WARN_ON(cpu == policy->cpu))
cb38ed5c
SB
1031 return;
1032
ad7722da 1033 down_write(&policy->rwsem);
8efd5765 1034
0d66b91e
SB
1035 policy->last_cpu = policy->cpu;
1036 policy->cpu = cpu;
1037
ad7722da 1038 up_write(&policy->rwsem);
8efd5765 1039
0d66b91e
SB
1040 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1041 CPUFREQ_UPDATE_POLICY_CPU, policy);
1042}
1043
96bbbe4a 1044static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 1045{
fcf80582 1046 unsigned int j, cpu = dev->id;
65922465 1047 int ret = -ENOMEM;
1da177e4 1048 struct cpufreq_policy *policy;
1da177e4 1049 unsigned long flags;
96bbbe4a 1050 bool recover_policy = cpufreq_suspended;
90e41bac 1051#ifdef CONFIG_HOTPLUG_CPU
1b274294 1052 struct cpufreq_policy *tpolicy;
90e41bac 1053#endif
1da177e4 1054
c32b6b8e
AR
1055 if (cpu_is_offline(cpu))
1056 return 0;
1057
2d06d8c4 1058 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
1059
1060#ifdef CONFIG_SMP
1061 /* check whether a different CPU already registered this
1062 * CPU because it is in the same boat. */
1063 policy = cpufreq_cpu_get(cpu);
1064 if (unlikely(policy)) {
8ff69732 1065 cpufreq_cpu_put(policy);
1da177e4
LT
1066 return 0;
1067 }
5025d628 1068#endif
fcf80582 1069
6eed9404
VK
1070 if (!down_read_trylock(&cpufreq_rwsem))
1071 return 0;
1072
fcf80582
VK
1073#ifdef CONFIG_HOTPLUG_CPU
1074 /* Check if this cpu was hot-unplugged earlier and has siblings */
0d1857a1 1075 read_lock_irqsave(&cpufreq_driver_lock, flags);
1b274294
VK
1076 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
1077 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
0d1857a1 1078 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
42f921a6 1079 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
6eed9404
VK
1080 up_read(&cpufreq_rwsem);
1081 return ret;
2eaa3e2d 1082 }
fcf80582 1083 }
0d1857a1 1084 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1085#endif
1086
72368d12
RW
1087 /*
1088 * Restore the saved policy when doing light-weight init and fall back
1089 * to the full init if that fails.
1090 */
96bbbe4a 1091 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
72368d12 1092 if (!policy) {
96bbbe4a 1093 recover_policy = false;
8414809c 1094 policy = cpufreq_policy_alloc();
72368d12
RW
1095 if (!policy)
1096 goto nomem_out;
1097 }
0d66b91e
SB
1098
1099 /*
1100 * In the resume path, since we restore a saved policy, the assignment
1101 * to policy->cpu is like an update of the existing policy, rather than
1102 * the creation of a brand new one. So we need to perform this update
1103 * by invoking update_policy_cpu().
1104 */
96bbbe4a 1105 if (recover_policy && cpu != policy->cpu)
0d66b91e
SB
1106 update_policy_cpu(policy, cpu);
1107 else
1108 policy->cpu = cpu;
1109
835481d9 1110 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 1111
1da177e4 1112 init_completion(&policy->kobj_unregister);
65f27f38 1113 INIT_WORK(&policy->update, handle_update);
1da177e4
LT
1114
1115 /* call driver. From then on the cpufreq must be able
1116 * to accept all calls to ->verify and ->setpolicy for this CPU
1117 */
1c3d85dd 1118 ret = cpufreq_driver->init(policy);
1da177e4 1119 if (ret) {
2d06d8c4 1120 pr_debug("initialization failed\n");
2eaa3e2d 1121 goto err_set_policy_cpu;
1da177e4 1122 }
643ae6e8 1123
5a7e56a5
VK
1124 /* related cpus should atleast have policy->cpus */
1125 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1126
1127 /*
1128 * affected cpus must always be the one, which are online. We aren't
1129 * managing offline cpus here.
1130 */
1131 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1132
96bbbe4a 1133 if (!recover_policy) {
5a7e56a5
VK
1134 policy->user_policy.min = policy->min;
1135 policy->user_policy.max = policy->max;
1136 }
1137
4e97b631 1138 down_write(&policy->rwsem);
652ed95d
VK
1139 write_lock_irqsave(&cpufreq_driver_lock, flags);
1140 for_each_cpu(j, policy->cpus)
1141 per_cpu(cpufreq_cpu_data, j) = policy;
1142 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1143
2ed99e39 1144 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
da60ce9f
VK
1145 policy->cur = cpufreq_driver->get(policy->cpu);
1146 if (!policy->cur) {
1147 pr_err("%s: ->get() failed\n", __func__);
1148 goto err_get_freq;
1149 }
1150 }
1151
d3916691
VK
1152 /*
1153 * Sometimes boot loaders set CPU frequency to a value outside of
1154 * frequency table present with cpufreq core. In such cases CPU might be
1155 * unstable if it has to run on that frequency for long duration of time
1156 * and so its better to set it to a frequency which is specified in
1157 * freq-table. This also makes cpufreq stats inconsistent as
1158 * cpufreq-stats would fail to register because current frequency of CPU
1159 * isn't found in freq-table.
1160 *
1161 * Because we don't want this change to effect boot process badly, we go
1162 * for the next freq which is >= policy->cur ('cur' must be set by now,
1163 * otherwise we will end up setting freq to lowest of the table as 'cur'
1164 * is initialized to zero).
1165 *
1166 * We are passing target-freq as "policy->cur - 1" otherwise
1167 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1168 * equal to target-freq.
1169 */
1170 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1171 && has_target()) {
1172 /* Are we running at unknown frequency ? */
1173 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1174 if (ret == -EINVAL) {
1175 /* Warn user and fix it */
1176 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1177 __func__, policy->cpu, policy->cur);
1178 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1179 CPUFREQ_RELATION_L);
1180
1181 /*
1182 * Reaching here after boot in a few seconds may not
1183 * mean that system will remain stable at "unknown"
1184 * frequency for longer duration. Hence, a BUG_ON().
1185 */
1186 BUG_ON(ret);
1187 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1188 __func__, policy->cpu, policy->cur);
1189 }
1190 }
1191
a1531acd
TR
1192 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1193 CPUFREQ_START, policy);
1194
96bbbe4a 1195 if (!recover_policy) {
308b60e7 1196 ret = cpufreq_add_dev_interface(policy, dev);
a82fab29
SB
1197 if (ret)
1198 goto err_out_unregister;
fcd7af91
VK
1199 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1200 CPUFREQ_CREATE_POLICY, policy);
a82fab29 1201 }
8ff69732 1202
9515f4d6
VK
1203 write_lock_irqsave(&cpufreq_driver_lock, flags);
1204 list_add(&policy->policy_list, &cpufreq_policy_list);
1205 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1206
e18f1682
SB
1207 cpufreq_init_policy(policy);
1208
96bbbe4a 1209 if (!recover_policy) {
08fd8c1c
VK
1210 policy->user_policy.policy = policy->policy;
1211 policy->user_policy.governor = policy->governor;
1212 }
4e97b631 1213 up_write(&policy->rwsem);
08fd8c1c 1214
038c5b3e 1215 kobject_uevent(&policy->kobj, KOBJ_ADD);
6eed9404
VK
1216 up_read(&cpufreq_rwsem);
1217
2d06d8c4 1218 pr_debug("initialization complete\n");
87c32271 1219
1da177e4
LT
1220 return 0;
1221
1da177e4 1222err_out_unregister:
652ed95d 1223err_get_freq:
0d1857a1 1224 write_lock_irqsave(&cpufreq_driver_lock, flags);
474deff7 1225 for_each_cpu(j, policy->cpus)
7a6aedfa 1226 per_cpu(cpufreq_cpu_data, j) = NULL;
0d1857a1 1227 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1228
da60ce9f
VK
1229 if (cpufreq_driver->exit)
1230 cpufreq_driver->exit(policy);
2eaa3e2d 1231err_set_policy_cpu:
96bbbe4a 1232 if (recover_policy) {
72368d12
RW
1233 /* Do not leave stale fallback data behind. */
1234 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
42f921a6 1235 cpufreq_policy_put_kobj(policy);
72368d12 1236 }
e9698cc5 1237 cpufreq_policy_free(policy);
42f921a6 1238
1da177e4 1239nomem_out:
6eed9404
VK
1240 up_read(&cpufreq_rwsem);
1241
1da177e4
LT
1242 return ret;
1243}
1244
a82fab29
SB
1245/**
1246 * cpufreq_add_dev - add a CPU device
1247 *
1248 * Adds the cpufreq interface for a CPU device.
1249 *
1250 * The Oracle says: try running cpufreq registration/unregistration concurrently
1251 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1252 * mess up, but more thorough testing is needed. - Mathieu
1253 */
1254static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1255{
96bbbe4a 1256 return __cpufreq_add_dev(dev, sif);
a82fab29
SB
1257}
1258
3a3e9e06 1259static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
42f921a6 1260 unsigned int old_cpu)
f9ba680d
SB
1261{
1262 struct device *cpu_dev;
f9ba680d
SB
1263 int ret;
1264
1265 /* first sibling now owns the new sysfs dir */
9c8f1ee4 1266 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
a82fab29 1267
f9ba680d 1268 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
3a3e9e06 1269 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
f9ba680d 1270 if (ret) {
e837f9b5 1271 pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
f9ba680d 1272
ad7722da 1273 down_write(&policy->rwsem);
3a3e9e06 1274 cpumask_set_cpu(old_cpu, policy->cpus);
ad7722da 1275 up_write(&policy->rwsem);
f9ba680d 1276
3a3e9e06 1277 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
f9ba680d
SB
1278 "cpufreq");
1279
1280 return -EINVAL;
1281 }
1282
1283 return cpu_dev->id;
1284}
1285
cedb70af 1286static int __cpufreq_remove_dev_prepare(struct device *dev,
96bbbe4a 1287 struct subsys_interface *sif)
1da177e4 1288{
f9ba680d 1289 unsigned int cpu = dev->id, cpus;
3de9bdeb 1290 int new_cpu, ret;
1da177e4 1291 unsigned long flags;
3a3e9e06 1292 struct cpufreq_policy *policy;
1da177e4 1293
b8eed8af 1294 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1295
0d1857a1 1296 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1297
3a3e9e06 1298 policy = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d 1299
8414809c 1300 /* Save the policy somewhere when doing a light-weight tear-down */
96bbbe4a 1301 if (cpufreq_suspended)
3a3e9e06 1302 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
8414809c 1303
0d1857a1 1304 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1305
3a3e9e06 1306 if (!policy) {
b8eed8af 1307 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1308 return -EINVAL;
1309 }
1da177e4 1310
9c0ebcf7 1311 if (has_target()) {
3de9bdeb
VK
1312 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1313 if (ret) {
1314 pr_err("%s: Failed to stop governor\n", __func__);
1315 return ret;
1316 }
1317 }
1da177e4 1318
1c3d85dd 1319 if (!cpufreq_driver->setpolicy)
fa69e33f 1320 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
3a3e9e06 1321 policy->governor->name, CPUFREQ_NAME_LEN);
1da177e4 1322
ad7722da 1323 down_read(&policy->rwsem);
3a3e9e06 1324 cpus = cpumask_weight(policy->cpus);
ad7722da 1325 up_read(&policy->rwsem);
084f3493 1326
61173f25 1327 if (cpu != policy->cpu) {
6964d91d 1328 sysfs_remove_link(&dev->kobj, "cpufreq");
73bf0fc2 1329 } else if (cpus > 1) {
42f921a6 1330 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
f9ba680d 1331 if (new_cpu >= 0) {
3a3e9e06 1332 update_policy_cpu(policy, new_cpu);
a82fab29 1333
96bbbe4a 1334 if (!cpufreq_suspended) {
75949c9a 1335 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
e837f9b5 1336 __func__, new_cpu, cpu);
a82fab29 1337 }
1da177e4
LT
1338 }
1339 }
1da177e4 1340
cedb70af
SB
1341 return 0;
1342}
1343
1344static int __cpufreq_remove_dev_finish(struct device *dev,
96bbbe4a 1345 struct subsys_interface *sif)
cedb70af
SB
1346{
1347 unsigned int cpu = dev->id, cpus;
1348 int ret;
1349 unsigned long flags;
1350 struct cpufreq_policy *policy;
cedb70af
SB
1351
1352 read_lock_irqsave(&cpufreq_driver_lock, flags);
1353 policy = per_cpu(cpufreq_cpu_data, cpu);
1354 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1355
1356 if (!policy) {
1357 pr_debug("%s: No cpu_data found\n", __func__);
1358 return -EINVAL;
1359 }
1360
ad7722da 1361 down_write(&policy->rwsem);
cedb70af 1362 cpus = cpumask_weight(policy->cpus);
9c8f1ee4
VK
1363
1364 if (cpus > 1)
1365 cpumask_clear_cpu(cpu, policy->cpus);
ad7722da 1366 up_write(&policy->rwsem);
cedb70af 1367
b8eed8af
VK
1368 /* If cpu is last user of policy, free policy */
1369 if (cpus == 1) {
9c0ebcf7 1370 if (has_target()) {
3de9bdeb
VK
1371 ret = __cpufreq_governor(policy,
1372 CPUFREQ_GOV_POLICY_EXIT);
1373 if (ret) {
1374 pr_err("%s: Failed to exit governor\n",
e837f9b5 1375 __func__);
3de9bdeb
VK
1376 return ret;
1377 }
edab2fbc 1378 }
2a998599 1379
96bbbe4a 1380 if (!cpufreq_suspended)
42f921a6 1381 cpufreq_policy_put_kobj(policy);
7d26e2d5 1382
8414809c
SB
1383 /*
1384 * Perform the ->exit() even during light-weight tear-down,
1385 * since this is a core component, and is essential for the
1386 * subsequent light-weight ->init() to succeed.
b8eed8af 1387 */
1c3d85dd 1388 if (cpufreq_driver->exit)
3a3e9e06 1389 cpufreq_driver->exit(policy);
27ecddc2 1390
9515f4d6
VK
1391 /* Remove policy from list of active policies */
1392 write_lock_irqsave(&cpufreq_driver_lock, flags);
1393 list_del(&policy->policy_list);
1394 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1395
96bbbe4a 1396 if (!cpufreq_suspended)
3a3e9e06 1397 cpufreq_policy_free(policy);
2a998599 1398 } else {
9c0ebcf7 1399 if (has_target()) {
3de9bdeb
VK
1400 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
1401 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
1402 pr_err("%s: Failed to start governor\n",
e837f9b5 1403 __func__);
3de9bdeb
VK
1404 return ret;
1405 }
2a998599 1406 }
27ecddc2 1407 }
1da177e4 1408
474deff7 1409 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1da177e4
LT
1410 return 0;
1411}
1412
cedb70af 1413/**
27a862e9 1414 * cpufreq_remove_dev - remove a CPU device
cedb70af
SB
1415 *
1416 * Removes the cpufreq interface for a CPU device.
cedb70af 1417 */
8a25a2fd 1418static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1419{
8a25a2fd 1420 unsigned int cpu = dev->id;
27a862e9 1421 int ret;
ec28297a
VP
1422
1423 if (cpu_is_offline(cpu))
1424 return 0;
1425
96bbbe4a 1426 ret = __cpufreq_remove_dev_prepare(dev, sif);
27a862e9
VK
1427
1428 if (!ret)
96bbbe4a 1429 ret = __cpufreq_remove_dev_finish(dev, sif);
27a862e9
VK
1430
1431 return ret;
5a01f2e8
VP
1432}
1433
65f27f38 1434static void handle_update(struct work_struct *work)
1da177e4 1435{
65f27f38
DH
1436 struct cpufreq_policy *policy =
1437 container_of(work, struct cpufreq_policy, update);
1438 unsigned int cpu = policy->cpu;
2d06d8c4 1439 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1440 cpufreq_update_policy(cpu);
1441}
1442
1443/**
bb176f7d
VK
1444 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1445 * in deep trouble.
1da177e4
LT
1446 * @cpu: cpu number
1447 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1448 * @new_freq: CPU frequency the CPU actually runs at
1449 *
29464f28
DJ
1450 * We adjust to current frequency first, and need to clean up later.
1451 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1452 */
e08f5f5b
GS
1453static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1454 unsigned int new_freq)
1da177e4 1455{
b43a7ffb 1456 struct cpufreq_policy *policy;
1da177e4 1457 struct cpufreq_freqs freqs;
b43a7ffb
VK
1458 unsigned long flags;
1459
e837f9b5
JP
1460 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1461 old_freq, new_freq);
1da177e4 1462
1da177e4
LT
1463 freqs.old = old_freq;
1464 freqs.new = new_freq;
b43a7ffb
VK
1465
1466 read_lock_irqsave(&cpufreq_driver_lock, flags);
1467 policy = per_cpu(cpufreq_cpu_data, cpu);
1468 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1469
1470 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1471 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1da177e4
LT
1472}
1473
32ee8c3e 1474/**
4ab70df4 1475 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1476 * @cpu: CPU number
1477 *
1478 * This is the last known freq, without actually getting it from the driver.
1479 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1480 */
1481unsigned int cpufreq_quick_get(unsigned int cpu)
1482{
9e21ba8b 1483 struct cpufreq_policy *policy;
e08f5f5b 1484 unsigned int ret_freq = 0;
95235ca2 1485
1c3d85dd
RW
1486 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1487 return cpufreq_driver->get(cpu);
9e21ba8b
DB
1488
1489 policy = cpufreq_cpu_get(cpu);
95235ca2 1490 if (policy) {
e08f5f5b 1491 ret_freq = policy->cur;
95235ca2
VP
1492 cpufreq_cpu_put(policy);
1493 }
1494
4d34a67d 1495 return ret_freq;
95235ca2
VP
1496}
1497EXPORT_SYMBOL(cpufreq_quick_get);
1498
3d737108
JB
1499/**
1500 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1501 * @cpu: CPU number
1502 *
1503 * Just return the max possible frequency for a given CPU.
1504 */
1505unsigned int cpufreq_quick_get_max(unsigned int cpu)
1506{
1507 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1508 unsigned int ret_freq = 0;
1509
1510 if (policy) {
1511 ret_freq = policy->max;
1512 cpufreq_cpu_put(policy);
1513 }
1514
1515 return ret_freq;
1516}
1517EXPORT_SYMBOL(cpufreq_quick_get_max);
1518
5a01f2e8 1519static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1520{
7a6aedfa 1521 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1522 unsigned int ret_freq = 0;
5800043b 1523
1c3d85dd 1524 if (!cpufreq_driver->get)
4d34a67d 1525 return ret_freq;
1da177e4 1526
1c3d85dd 1527 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1528
e08f5f5b 1529 if (ret_freq && policy->cur &&
1c3d85dd 1530 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1531 /* verify no discrepancy between actual and
1532 saved value exists */
1533 if (unlikely(ret_freq != policy->cur)) {
1534 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1535 schedule_work(&policy->update);
1536 }
1537 }
1538
4d34a67d 1539 return ret_freq;
5a01f2e8 1540}
1da177e4 1541
5a01f2e8
VP
1542/**
1543 * cpufreq_get - get the current CPU frequency (in kHz)
1544 * @cpu: CPU number
1545 *
1546 * Get the CPU current (static) CPU frequency
1547 */
1548unsigned int cpufreq_get(unsigned int cpu)
1549{
999976e0 1550 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
5a01f2e8 1551 unsigned int ret_freq = 0;
5a01f2e8 1552
999976e0
AP
1553 if (policy) {
1554 down_read(&policy->rwsem);
1555 ret_freq = __cpufreq_get(cpu);
1556 up_read(&policy->rwsem);
5a01f2e8 1557
999976e0
AP
1558 cpufreq_cpu_put(policy);
1559 }
6eed9404 1560
4d34a67d 1561 return ret_freq;
1da177e4
LT
1562}
1563EXPORT_SYMBOL(cpufreq_get);
1564
8a25a2fd
KS
1565static struct subsys_interface cpufreq_interface = {
1566 .name = "cpufreq",
1567 .subsys = &cpu_subsys,
1568 .add_dev = cpufreq_add_dev,
1569 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1570};
1571
e28867ea
VK
1572/*
1573 * In case platform wants some specific frequency to be configured
1574 * during suspend..
1575 */
1576int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1577{
1578 int ret;
1579
1580 if (!policy->suspend_freq) {
1581 pr_err("%s: suspend_freq can't be zero\n", __func__);
1582 return -EINVAL;
1583 }
1584
1585 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1586 policy->suspend_freq);
1587
1588 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1589 CPUFREQ_RELATION_H);
1590 if (ret)
1591 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1592 __func__, policy->suspend_freq, ret);
1593
1594 return ret;
1595}
1596EXPORT_SYMBOL(cpufreq_generic_suspend);
1597
42d4dc3f 1598/**
2f0aea93 1599 * cpufreq_suspend() - Suspend CPUFreq governors
e00e56df 1600 *
2f0aea93
VK
1601 * Called during system wide Suspend/Hibernate cycles for suspending governors
1602 * as some platforms can't change frequency after this point in suspend cycle.
1603 * Because some of the devices (like: i2c, regulators, etc) they use for
1604 * changing frequency are suspended quickly after this point.
42d4dc3f 1605 */
2f0aea93 1606void cpufreq_suspend(void)
42d4dc3f 1607{
3a3e9e06 1608 struct cpufreq_policy *policy;
42d4dc3f 1609
2f0aea93
VK
1610 if (!cpufreq_driver)
1611 return;
42d4dc3f 1612
2f0aea93
VK
1613 if (!has_target())
1614 return;
42d4dc3f 1615
2f0aea93
VK
1616 pr_debug("%s: Suspending Governors\n", __func__);
1617
1618 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1619 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1620 pr_err("%s: Failed to stop governor for policy: %p\n",
1621 __func__, policy);
1622 else if (cpufreq_driver->suspend
1623 && cpufreq_driver->suspend(policy))
1624 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1625 policy);
42d4dc3f
BH
1626 }
1627
2f0aea93 1628 cpufreq_suspended = true;
42d4dc3f
BH
1629}
1630
1da177e4 1631/**
2f0aea93 1632 * cpufreq_resume() - Resume CPUFreq governors
1da177e4 1633 *
2f0aea93
VK
1634 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1635 * are suspended with cpufreq_suspend().
1da177e4 1636 */
2f0aea93 1637void cpufreq_resume(void)
1da177e4 1638{
3a3e9e06 1639 struct cpufreq_policy *policy;
1da177e4 1640
2f0aea93
VK
1641 if (!cpufreq_driver)
1642 return;
1da177e4 1643
2f0aea93 1644 if (!has_target())
e00e56df 1645 return;
1da177e4 1646
2f0aea93 1647 pr_debug("%s: Resuming Governors\n", __func__);
1da177e4 1648
2f0aea93 1649 cpufreq_suspended = false;
ce6c3997 1650
2f0aea93
VK
1651 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1652 if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1653 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1654 pr_err("%s: Failed to start governor for policy: %p\n",
1655 __func__, policy);
1656 else if (cpufreq_driver->resume
1657 && cpufreq_driver->resume(policy))
1658 pr_err("%s: Failed to resume driver: %p\n", __func__,
1659 policy);
1da177e4 1660
2f0aea93
VK
1661 /*
1662 * schedule call cpufreq_update_policy() for boot CPU, i.e. last
1663 * policy in list. It will verify that the current freq is in
1664 * sync with what we believe it to be.
1665 */
1666 if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
1667 schedule_work(&policy->update);
1668 }
1669}
1da177e4 1670
9d95046e
BP
1671/**
1672 * cpufreq_get_current_driver - return current driver's name
1673 *
1674 * Return the name string of the currently loaded cpufreq driver
1675 * or NULL, if none.
1676 */
1677const char *cpufreq_get_current_driver(void)
1678{
1c3d85dd
RW
1679 if (cpufreq_driver)
1680 return cpufreq_driver->name;
1681
1682 return NULL;
9d95046e
BP
1683}
1684EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1685
1686/*********************************************************************
1687 * NOTIFIER LISTS INTERFACE *
1688 *********************************************************************/
1689
1690/**
1691 * cpufreq_register_notifier - register a driver with cpufreq
1692 * @nb: notifier function to register
1693 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1694 *
32ee8c3e 1695 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1696 * are notified about clock rate changes (once before and once after
1697 * the transition), or a list of drivers that are notified about
1698 * changes in cpufreq policy.
1699 *
1700 * This function may sleep, and has the same return conditions as
e041c683 1701 * blocking_notifier_chain_register.
1da177e4
LT
1702 */
1703int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1704{
1705 int ret;
1706
d5aaffa9
DB
1707 if (cpufreq_disabled())
1708 return -EINVAL;
1709
74212ca4
CEB
1710 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1711
1da177e4
LT
1712 switch (list) {
1713 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1714 ret = srcu_notifier_chain_register(
e041c683 1715 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1716 break;
1717 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1718 ret = blocking_notifier_chain_register(
1719 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1720 break;
1721 default:
1722 ret = -EINVAL;
1723 }
1da177e4
LT
1724
1725 return ret;
1726}
1727EXPORT_SYMBOL(cpufreq_register_notifier);
1728
1da177e4
LT
1729/**
1730 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1731 * @nb: notifier block to be unregistered
bb176f7d 1732 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1da177e4
LT
1733 *
1734 * Remove a driver from the CPU frequency notifier list.
1735 *
1736 * This function may sleep, and has the same return conditions as
e041c683 1737 * blocking_notifier_chain_unregister.
1da177e4
LT
1738 */
1739int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1740{
1741 int ret;
1742
d5aaffa9
DB
1743 if (cpufreq_disabled())
1744 return -EINVAL;
1745
1da177e4
LT
1746 switch (list) {
1747 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1748 ret = srcu_notifier_chain_unregister(
e041c683 1749 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1750 break;
1751 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1752 ret = blocking_notifier_chain_unregister(
1753 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1754 break;
1755 default:
1756 ret = -EINVAL;
1757 }
1da177e4
LT
1758
1759 return ret;
1760}
1761EXPORT_SYMBOL(cpufreq_unregister_notifier);
1762
1763
1764/*********************************************************************
1765 * GOVERNORS *
1766 *********************************************************************/
1767
1da177e4
LT
1768int __cpufreq_driver_target(struct cpufreq_policy *policy,
1769 unsigned int target_freq,
1770 unsigned int relation)
1771{
1772 int retval = -EINVAL;
7249924e 1773 unsigned int old_target_freq = target_freq;
c32b6b8e 1774
a7b422cd
KRW
1775 if (cpufreq_disabled())
1776 return -ENODEV;
1777
7249924e
VK
1778 /* Make sure that target_freq is within supported range */
1779 if (target_freq > policy->max)
1780 target_freq = policy->max;
1781 if (target_freq < policy->min)
1782 target_freq = policy->min;
1783
1784 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
e837f9b5 1785 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228 1786
9c0ebcf7
VK
1787 /*
1788 * This might look like a redundant call as we are checking it again
1789 * after finding index. But it is left intentionally for cases where
1790 * exactly same freq is called again and so we can save on few function
1791 * calls.
1792 */
5a1c0228
VK
1793 if (target_freq == policy->cur)
1794 return 0;
1795
1c3d85dd
RW
1796 if (cpufreq_driver->target)
1797 retval = cpufreq_driver->target(policy, target_freq, relation);
9c0ebcf7
VK
1798 else if (cpufreq_driver->target_index) {
1799 struct cpufreq_frequency_table *freq_table;
d4019f0a
VK
1800 struct cpufreq_freqs freqs;
1801 bool notify;
9c0ebcf7 1802 int index;
90d45d17 1803
9c0ebcf7
VK
1804 freq_table = cpufreq_frequency_get_table(policy->cpu);
1805 if (unlikely(!freq_table)) {
1806 pr_err("%s: Unable to find freq_table\n", __func__);
1807 goto out;
1808 }
1809
1810 retval = cpufreq_frequency_table_target(policy, freq_table,
1811 target_freq, relation, &index);
1812 if (unlikely(retval)) {
1813 pr_err("%s: Unable to find matching freq\n", __func__);
1814 goto out;
1815 }
1816
d4019f0a 1817 if (freq_table[index].frequency == policy->cur) {
9c0ebcf7 1818 retval = 0;
d4019f0a
VK
1819 goto out;
1820 }
1821
1822 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1823
1824 if (notify) {
1825 freqs.old = policy->cur;
1826 freqs.new = freq_table[index].frequency;
1827 freqs.flags = 0;
1828
1829 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
e837f9b5 1830 __func__, policy->cpu, freqs.old, freqs.new);
d4019f0a
VK
1831
1832 cpufreq_notify_transition(policy, &freqs,
1833 CPUFREQ_PRECHANGE);
1834 }
1835
1836 retval = cpufreq_driver->target_index(policy, index);
1837 if (retval)
1838 pr_err("%s: Failed to change cpu frequency: %d\n",
e837f9b5 1839 __func__, retval);
d4019f0a 1840
ab1b1c4e
VK
1841 if (notify)
1842 cpufreq_notify_post_transition(policy, &freqs, retval);
9c0ebcf7
VK
1843 }
1844
1845out:
1da177e4
LT
1846 return retval;
1847}
1848EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1849
1da177e4
LT
1850int cpufreq_driver_target(struct cpufreq_policy *policy,
1851 unsigned int target_freq,
1852 unsigned int relation)
1853{
f1829e4a 1854 int ret = -EINVAL;
1da177e4 1855
ad7722da 1856 down_write(&policy->rwsem);
1da177e4
LT
1857
1858 ret = __cpufreq_driver_target(policy, target_freq, relation);
1859
ad7722da 1860 up_write(&policy->rwsem);
1da177e4 1861
1da177e4
LT
1862 return ret;
1863}
1864EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1865
153d7f3f 1866/*
153d7f3f
AV
1867 * when "event" is CPUFREQ_GOV_LIMITS
1868 */
1da177e4 1869
e08f5f5b
GS
1870static int __cpufreq_governor(struct cpufreq_policy *policy,
1871 unsigned int event)
1da177e4 1872{
cc993cab 1873 int ret;
6afde10c
TR
1874
1875 /* Only must be defined when default governor is known to have latency
1876 restrictions, like e.g. conservative or ondemand.
1877 That this is the case is already ensured in Kconfig
1878 */
1879#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1880 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1881#else
1882 struct cpufreq_governor *gov = NULL;
1883#endif
1c256245 1884
2f0aea93
VK
1885 /* Don't start any governor operations if we are entering suspend */
1886 if (cpufreq_suspended)
1887 return 0;
1888
1c256245
TR
1889 if (policy->governor->max_transition_latency &&
1890 policy->cpuinfo.transition_latency >
1891 policy->governor->max_transition_latency) {
6afde10c
TR
1892 if (!gov)
1893 return -EINVAL;
1894 else {
e837f9b5
JP
1895 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
1896 policy->governor->name, gov->name);
6afde10c
TR
1897 policy->governor = gov;
1898 }
1c256245 1899 }
1da177e4 1900
fe492f3f
VK
1901 if (event == CPUFREQ_GOV_POLICY_INIT)
1902 if (!try_module_get(policy->governor->owner))
1903 return -EINVAL;
1da177e4 1904
2d06d8c4 1905 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e837f9b5 1906 policy->cpu, event);
95731ebb
XC
1907
1908 mutex_lock(&cpufreq_governor_lock);
56d07db2 1909 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
f73d3933
VK
1910 || (!policy->governor_enabled
1911 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
95731ebb
XC
1912 mutex_unlock(&cpufreq_governor_lock);
1913 return -EBUSY;
1914 }
1915
1916 if (event == CPUFREQ_GOV_STOP)
1917 policy->governor_enabled = false;
1918 else if (event == CPUFREQ_GOV_START)
1919 policy->governor_enabled = true;
1920
1921 mutex_unlock(&cpufreq_governor_lock);
1922
1da177e4
LT
1923 ret = policy->governor->governor(policy, event);
1924
4d5dcc42
VK
1925 if (!ret) {
1926 if (event == CPUFREQ_GOV_POLICY_INIT)
1927 policy->governor->initialized++;
1928 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1929 policy->governor->initialized--;
95731ebb
XC
1930 } else {
1931 /* Restore original values */
1932 mutex_lock(&cpufreq_governor_lock);
1933 if (event == CPUFREQ_GOV_STOP)
1934 policy->governor_enabled = true;
1935 else if (event == CPUFREQ_GOV_START)
1936 policy->governor_enabled = false;
1937 mutex_unlock(&cpufreq_governor_lock);
4d5dcc42 1938 }
b394058f 1939
fe492f3f
VK
1940 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1941 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1da177e4
LT
1942 module_put(policy->governor->owner);
1943
1944 return ret;
1945}
1946
1da177e4
LT
1947int cpufreq_register_governor(struct cpufreq_governor *governor)
1948{
3bcb09a3 1949 int err;
1da177e4
LT
1950
1951 if (!governor)
1952 return -EINVAL;
1953
a7b422cd
KRW
1954 if (cpufreq_disabled())
1955 return -ENODEV;
1956
3fc54d37 1957 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1958
b394058f 1959 governor->initialized = 0;
3bcb09a3
JF
1960 err = -EBUSY;
1961 if (__find_governor(governor->name) == NULL) {
1962 err = 0;
1963 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1964 }
1da177e4 1965
32ee8c3e 1966 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1967 return err;
1da177e4
LT
1968}
1969EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1970
1da177e4
LT
1971void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1972{
90e41bac 1973 int cpu;
90e41bac 1974
1da177e4
LT
1975 if (!governor)
1976 return;
1977
a7b422cd
KRW
1978 if (cpufreq_disabled())
1979 return;
1980
90e41bac
PB
1981 for_each_present_cpu(cpu) {
1982 if (cpu_online(cpu))
1983 continue;
1984 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1985 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1986 }
90e41bac 1987
3fc54d37 1988 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1989 list_del(&governor->governor_list);
3fc54d37 1990 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1991 return;
1992}
1993EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1994
1995
1da177e4
LT
1996/*********************************************************************
1997 * POLICY INTERFACE *
1998 *********************************************************************/
1999
2000/**
2001 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
2002 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2003 * is written
1da177e4
LT
2004 *
2005 * Reads the current cpufreq policy.
2006 */
2007int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2008{
2009 struct cpufreq_policy *cpu_policy;
2010 if (!policy)
2011 return -EINVAL;
2012
2013 cpu_policy = cpufreq_cpu_get(cpu);
2014 if (!cpu_policy)
2015 return -EINVAL;
2016
d5b73cd8 2017 memcpy(policy, cpu_policy, sizeof(*policy));
1da177e4
LT
2018
2019 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
2020 return 0;
2021}
2022EXPORT_SYMBOL(cpufreq_get_policy);
2023
153d7f3f 2024/*
037ce839
VK
2025 * policy : current policy.
2026 * new_policy: policy to be set.
153d7f3f 2027 */
037ce839 2028static int cpufreq_set_policy(struct cpufreq_policy *policy,
3a3e9e06 2029 struct cpufreq_policy *new_policy)
1da177e4 2030{
d9a789c7
RW
2031 struct cpufreq_governor *old_gov;
2032 int ret;
1da177e4 2033
e837f9b5
JP
2034 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2035 new_policy->cpu, new_policy->min, new_policy->max);
1da177e4 2036
d5b73cd8 2037 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
1da177e4 2038
d9a789c7
RW
2039 if (new_policy->min > policy->max || new_policy->max < policy->min)
2040 return -EINVAL;
9c9a43ed 2041
1da177e4 2042 /* verify the cpu speed can be set within this limit */
3a3e9e06 2043 ret = cpufreq_driver->verify(new_policy);
1da177e4 2044 if (ret)
d9a789c7 2045 return ret;
1da177e4 2046
1da177e4 2047 /* adjust if necessary - all reasons */
e041c683 2048 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 2049 CPUFREQ_ADJUST, new_policy);
1da177e4
LT
2050
2051 /* adjust if necessary - hardware incompatibility*/
e041c683 2052 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 2053 CPUFREQ_INCOMPATIBLE, new_policy);
1da177e4 2054
bb176f7d
VK
2055 /*
2056 * verify the cpu speed can be set within this limit, which might be
2057 * different to the first one
2058 */
3a3e9e06 2059 ret = cpufreq_driver->verify(new_policy);
e041c683 2060 if (ret)
d9a789c7 2061 return ret;
1da177e4
LT
2062
2063 /* notification of the new policy */
e041c683 2064 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 2065 CPUFREQ_NOTIFY, new_policy);
1da177e4 2066
3a3e9e06
VK
2067 policy->min = new_policy->min;
2068 policy->max = new_policy->max;
1da177e4 2069
2d06d8c4 2070 pr_debug("new min and max freqs are %u - %u kHz\n",
e837f9b5 2071 policy->min, policy->max);
1da177e4 2072
1c3d85dd 2073 if (cpufreq_driver->setpolicy) {
3a3e9e06 2074 policy->policy = new_policy->policy;
2d06d8c4 2075 pr_debug("setting range\n");
d9a789c7
RW
2076 return cpufreq_driver->setpolicy(new_policy);
2077 }
1da177e4 2078
d9a789c7
RW
2079 if (new_policy->governor == policy->governor)
2080 goto out;
7bd353a9 2081
d9a789c7
RW
2082 pr_debug("governor switch\n");
2083
2084 /* save old, working values */
2085 old_gov = policy->governor;
2086 /* end old governor */
2087 if (old_gov) {
2088 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2089 up_write(&policy->rwsem);
2090 __cpufreq_governor(policy,CPUFREQ_GOV_POLICY_EXIT);
2091 down_write(&policy->rwsem);
1da177e4
LT
2092 }
2093
d9a789c7
RW
2094 /* start new governor */
2095 policy->governor = new_policy->governor;
2096 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2097 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2098 goto out;
2099
2100 up_write(&policy->rwsem);
2101 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2102 down_write(&policy->rwsem);
2103 }
2104
2105 /* new governor failed, so re-start old one */
2106 pr_debug("starting governor %s failed\n", policy->governor->name);
2107 if (old_gov) {
2108 policy->governor = old_gov;
2109 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2110 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2111 }
2112
2113 return -EINVAL;
2114
2115 out:
2116 pr_debug("governor: change or update limits\n");
2117 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1da177e4
LT
2118}
2119
1da177e4
LT
2120/**
2121 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2122 * @cpu: CPU which shall be re-evaluated
2123 *
25985edc 2124 * Useful for policy notifiers which have different necessities
1da177e4
LT
2125 * at different times.
2126 */
2127int cpufreq_update_policy(unsigned int cpu)
2128{
3a3e9e06
VK
2129 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2130 struct cpufreq_policy new_policy;
f1829e4a 2131 int ret;
1da177e4 2132
3a3e9e06 2133 if (!policy) {
f1829e4a
JL
2134 ret = -ENODEV;
2135 goto no_policy;
2136 }
1da177e4 2137
ad7722da 2138 down_write(&policy->rwsem);
1da177e4 2139
2d06d8c4 2140 pr_debug("updating policy for CPU %u\n", cpu);
d5b73cd8 2141 memcpy(&new_policy, policy, sizeof(*policy));
3a3e9e06
VK
2142 new_policy.min = policy->user_policy.min;
2143 new_policy.max = policy->user_policy.max;
2144 new_policy.policy = policy->user_policy.policy;
2145 new_policy.governor = policy->user_policy.governor;
1da177e4 2146
bb176f7d
VK
2147 /*
2148 * BIOS might change freq behind our back
2149 * -> ask driver for current freq and notify governors about a change
2150 */
2ed99e39 2151 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
3a3e9e06 2152 new_policy.cur = cpufreq_driver->get(cpu);
bd0fa9bb
VK
2153 if (WARN_ON(!new_policy.cur)) {
2154 ret = -EIO;
2155 goto no_policy;
2156 }
2157
3a3e9e06 2158 if (!policy->cur) {
e837f9b5 2159 pr_debug("Driver did not initialize current freq\n");
3a3e9e06 2160 policy->cur = new_policy.cur;
a85f7bd3 2161 } else {
9c0ebcf7 2162 if (policy->cur != new_policy.cur && has_target())
3a3e9e06
VK
2163 cpufreq_out_of_sync(cpu, policy->cur,
2164 new_policy.cur);
a85f7bd3 2165 }
0961dd0d
TR
2166 }
2167
037ce839 2168 ret = cpufreq_set_policy(policy, &new_policy);
1da177e4 2169
ad7722da 2170 up_write(&policy->rwsem);
5a01f2e8 2171
3a3e9e06 2172 cpufreq_cpu_put(policy);
f1829e4a 2173no_policy:
1da177e4
LT
2174 return ret;
2175}
2176EXPORT_SYMBOL(cpufreq_update_policy);
2177
2760984f 2178static int cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
2179 unsigned long action, void *hcpu)
2180{
2181 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 2182 struct device *dev;
c32b6b8e 2183
8a25a2fd
KS
2184 dev = get_cpu_device(cpu);
2185 if (dev) {
5302c3fb 2186 switch (action & ~CPU_TASKS_FROZEN) {
c32b6b8e 2187 case CPU_ONLINE:
96bbbe4a 2188 __cpufreq_add_dev(dev, NULL);
c32b6b8e 2189 break;
5302c3fb 2190
c32b6b8e 2191 case CPU_DOWN_PREPARE:
96bbbe4a 2192 __cpufreq_remove_dev_prepare(dev, NULL);
1aee40ac
SB
2193 break;
2194
2195 case CPU_POST_DEAD:
96bbbe4a 2196 __cpufreq_remove_dev_finish(dev, NULL);
c32b6b8e 2197 break;
5302c3fb 2198
5a01f2e8 2199 case CPU_DOWN_FAILED:
96bbbe4a 2200 __cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
2201 break;
2202 }
2203 }
2204 return NOTIFY_OK;
2205}
2206
9c36f746 2207static struct notifier_block __refdata cpufreq_cpu_notifier = {
bb176f7d 2208 .notifier_call = cpufreq_cpu_callback,
c32b6b8e 2209};
1da177e4 2210
6f19efc0
LM
2211/*********************************************************************
2212 * BOOST *
2213 *********************************************************************/
2214static int cpufreq_boost_set_sw(int state)
2215{
2216 struct cpufreq_frequency_table *freq_table;
2217 struct cpufreq_policy *policy;
2218 int ret = -EINVAL;
2219
2220 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
2221 freq_table = cpufreq_frequency_get_table(policy->cpu);
2222 if (freq_table) {
2223 ret = cpufreq_frequency_table_cpuinfo(policy,
2224 freq_table);
2225 if (ret) {
2226 pr_err("%s: Policy frequency update failed\n",
2227 __func__);
2228 break;
2229 }
2230 policy->user_policy.max = policy->max;
2231 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2232 }
2233 }
2234
2235 return ret;
2236}
2237
2238int cpufreq_boost_trigger_state(int state)
2239{
2240 unsigned long flags;
2241 int ret = 0;
2242
2243 if (cpufreq_driver->boost_enabled == state)
2244 return 0;
2245
2246 write_lock_irqsave(&cpufreq_driver_lock, flags);
2247 cpufreq_driver->boost_enabled = state;
2248 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2249
2250 ret = cpufreq_driver->set_boost(state);
2251 if (ret) {
2252 write_lock_irqsave(&cpufreq_driver_lock, flags);
2253 cpufreq_driver->boost_enabled = !state;
2254 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2255
e837f9b5
JP
2256 pr_err("%s: Cannot %s BOOST\n",
2257 __func__, state ? "enable" : "disable");
6f19efc0
LM
2258 }
2259
2260 return ret;
2261}
2262
2263int cpufreq_boost_supported(void)
2264{
2265 if (likely(cpufreq_driver))
2266 return cpufreq_driver->boost_supported;
2267
2268 return 0;
2269}
2270EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2271
2272int cpufreq_boost_enabled(void)
2273{
2274 return cpufreq_driver->boost_enabled;
2275}
2276EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2277
1da177e4
LT
2278/*********************************************************************
2279 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2280 *********************************************************************/
2281
2282/**
2283 * cpufreq_register_driver - register a CPU Frequency driver
2284 * @driver_data: A struct cpufreq_driver containing the values#
2285 * submitted by the CPU Frequency driver.
2286 *
bb176f7d 2287 * Registers a CPU Frequency driver to this core code. This code
1da177e4 2288 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 2289 * (and isn't unregistered in the meantime).
1da177e4
LT
2290 *
2291 */
221dee28 2292int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
2293{
2294 unsigned long flags;
2295 int ret;
2296
a7b422cd
KRW
2297 if (cpufreq_disabled())
2298 return -ENODEV;
2299
1da177e4 2300 if (!driver_data || !driver_data->verify || !driver_data->init ||
9c0ebcf7
VK
2301 !(driver_data->setpolicy || driver_data->target_index ||
2302 driver_data->target))
1da177e4
LT
2303 return -EINVAL;
2304
2d06d8c4 2305 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
2306
2307 if (driver_data->setpolicy)
2308 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2309
0d1857a1 2310 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2311 if (cpufreq_driver) {
0d1857a1 2312 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4dea5806 2313 return -EEXIST;
1da177e4 2314 }
1c3d85dd 2315 cpufreq_driver = driver_data;
0d1857a1 2316 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 2317
6f19efc0
LM
2318 if (cpufreq_boost_supported()) {
2319 /*
2320 * Check if driver provides function to enable boost -
2321 * if not, use cpufreq_boost_set_sw as default
2322 */
2323 if (!cpufreq_driver->set_boost)
2324 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2325
2326 ret = cpufreq_sysfs_create_file(&boost.attr);
2327 if (ret) {
2328 pr_err("%s: cannot register global BOOST sysfs file\n",
e837f9b5 2329 __func__);
6f19efc0
LM
2330 goto err_null_driver;
2331 }
2332 }
2333
8a25a2fd 2334 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab 2335 if (ret)
6f19efc0 2336 goto err_boost_unreg;
1da177e4 2337
1c3d85dd 2338 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
2339 int i;
2340 ret = -ENODEV;
2341
2342 /* check for at least one working CPU */
7a6aedfa
MT
2343 for (i = 0; i < nr_cpu_ids; i++)
2344 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 2345 ret = 0;
7a6aedfa
MT
2346 break;
2347 }
1da177e4
LT
2348
2349 /* if all ->init() calls failed, unregister */
2350 if (ret) {
2d06d8c4 2351 pr_debug("no CPU initialized for driver %s\n",
e837f9b5 2352 driver_data->name);
8a25a2fd 2353 goto err_if_unreg;
1da177e4
LT
2354 }
2355 }
2356
8f5bc2ab 2357 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 2358 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 2359
8f5bc2ab 2360 return 0;
8a25a2fd
KS
2361err_if_unreg:
2362 subsys_interface_unregister(&cpufreq_interface);
6f19efc0
LM
2363err_boost_unreg:
2364 if (cpufreq_boost_supported())
2365 cpufreq_sysfs_remove_file(&boost.attr);
8f5bc2ab 2366err_null_driver:
0d1857a1 2367 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2368 cpufreq_driver = NULL;
0d1857a1 2369 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 2370 return ret;
1da177e4
LT
2371}
2372EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2373
1da177e4
LT
2374/**
2375 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2376 *
bb176f7d 2377 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
2378 * the right to do so, i.e. if you have succeeded in initialising before!
2379 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2380 * currently not initialised.
2381 */
221dee28 2382int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
2383{
2384 unsigned long flags;
2385
1c3d85dd 2386 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 2387 return -EINVAL;
1da177e4 2388
2d06d8c4 2389 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 2390
8a25a2fd 2391 subsys_interface_unregister(&cpufreq_interface);
6f19efc0
LM
2392 if (cpufreq_boost_supported())
2393 cpufreq_sysfs_remove_file(&boost.attr);
2394
65edc68c 2395 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 2396
6eed9404 2397 down_write(&cpufreq_rwsem);
0d1857a1 2398 write_lock_irqsave(&cpufreq_driver_lock, flags);
6eed9404 2399
1c3d85dd 2400 cpufreq_driver = NULL;
6eed9404 2401
0d1857a1 2402 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
6eed9404 2403 up_write(&cpufreq_rwsem);
1da177e4
LT
2404
2405 return 0;
2406}
2407EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
2408
2409static int __init cpufreq_core_init(void)
2410{
a7b422cd
KRW
2411 if (cpufreq_disabled())
2412 return -ENODEV;
2413
2361be23 2414 cpufreq_global_kobject = kobject_create();
8aa84ad8
TR
2415 BUG_ON(!cpufreq_global_kobject);
2416
5a01f2e8
VP
2417 return 0;
2418}
5a01f2e8 2419core_initcall(cpufreq_core_init);